diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fe6bea9d8d..137fe61320 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -494,16 +494,11 @@ the version in `Cargo.lock`, so the build can no longer continue. To resolve this, we need to update `Cargo.lock`. Luckily, cargo provides a command to do this easily. -First, go into the `src/` directory since that is where `Cargo.toml` is in -the rust repository. Then run, `cargo update -p rustfmt-nightly` to solve -the problem. - ``` -$ cd src $ cargo update -p rustfmt-nightly ``` -This should change the version listed in `src/Cargo.lock` to the new version you updated +This should change the version listed in `Cargo.lock` to the new version you updated the submodule to. Running `./x.py build` should work now. ## Writing Documentation @@ -645,7 +640,7 @@ are: * **Google!** ([search only in Rust Documentation][gsearchdocs] to find types, traits, etc. quickly) * Don't be afraid to ask! The Rust community is friendly and helpful. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/about-this-guide.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/about-this-guide.html [gdfrustc]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ [gsearchdocs]: https://www.google.com/search?q=site:doc.rust-lang.org+your+query+here [rif]: http://internals.rust-lang.org @@ -653,5 +648,5 @@ are: [rustforge]: https://forge.rust-lang.org/ [tlgba]: http://tomlee.co/2014/04/a-more-detailed-tour-of-the-rust-compiler/ [ro]: http://www.rustaceans.org/ -[rctd]: https://rust-lang-nursery.github.io/rustc-guide/tests/intro.html +[rctd]: https://rust-lang.github.io/rustc-guide/tests/intro.html [cheatsheet]: https://buildbot2.rust-lang.org/homu/ diff --git a/src/Cargo.lock b/Cargo.lock similarity index 94% rename from src/Cargo.lock rename to Cargo.lock index fed6a3d098..69d929e913 100644 --- a/src/Cargo.lock +++ b/Cargo.lock @@ -15,27 +15,6 @@ dependencies = [ "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "alloc_jemalloc" -version = "0.0.0" -dependencies = [ - "build_helper 0.1.0", - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.0.0", - "core 0.0.0", - "libc 0.0.0", -] - -[[package]] -name = "alloc_system" -version = "0.0.0" -dependencies = [ - "compiler_builtins 0.0.0", - "core 0.0.0", - "dlmalloc 0.0.0", - "libc 0.0.0", -] - [[package]] name = "ammonia" version = "1.1.0" @@ -198,19 +177,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cargo" -version = "0.32.0" +version = "0.33.0" dependencies = [ "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "bytesize 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "crates-io 0.20.0", - "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crates-io 0.21.0", + "crossbeam-utils 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "curl 0.4.18 (registry+https://github.com/rust-lang/crates.io-index)", - "curl-sys 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -222,6 +201,7 @@ dependencies = [ "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ignore 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "im-rc 12.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -232,6 +212,7 @@ dependencies = [ "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "opener 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "openssl 0.10.13 (registry+https://github.com/rust-lang/crates.io-index)", + "pretty_env_logger 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "proptest 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-workspace-hack 1.0.0", "rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -242,7 +223,7 @@ dependencies = [ "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", "shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "termcolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -279,7 +260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chalk-engine" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "chalk-macros 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -327,7 +308,7 @@ dependencies = [ "clippy-mini-macro-test 0.2.0", "clippy_dev 0.0.1", "clippy_lints 0.0.212", - "compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)", + "compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", "derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -362,7 +343,7 @@ dependencies = [ "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "pulldown-cmark 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -444,7 +425,7 @@ dependencies = [ [[package]] name = "compiletest_rs" -version = "0.3.13" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -453,6 +434,7 @@ dependencies = [ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -483,9 +465,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "crates-io" -version = "0.20.0" +version = "0.21.0" dependencies = [ - "curl 0.4.18 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.75 (registry+https://github.com/rust-lang/crates.io-index)", @@ -559,6 +541,14 @@ name = "crossbeam-utils" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "crossbeam-utils" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "crypto-hash" version = "0.3.1" @@ -572,10 +562,10 @@ dependencies = [ [[package]] name = "curl" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl-sys 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -587,7 +577,7 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", @@ -636,6 +626,15 @@ name = "difference" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "directories" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "dlmalloc" version = "0.0.0" @@ -671,6 +670,14 @@ dependencies = [ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ena" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "env_logger" version = "0.5.12" @@ -683,6 +690,18 @@ dependencies = [ "termcolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "env_logger" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "environment" version = "0.1.1" @@ -752,6 +771,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", "miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -786,6 +806,11 @@ dependencies = [ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "fs_extra" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "fst" version = "0.3.0" @@ -855,7 +880,7 @@ name = "git2-curl" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl 0.4.18 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", "git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -964,6 +989,15 @@ dependencies = [ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "im-rc" +version = "12.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "installer" version = "0.0.0" @@ -973,7 +1007,7 @@ dependencies = [ "flate2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "xz2 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -997,6 +1031,16 @@ name = "itoa" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "jemalloc-sys" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "jobserver" version = "0.1.11" @@ -1085,7 +1129,7 @@ version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "curl-sys 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libssh2-sys 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1291,7 +1335,8 @@ dependencies = [ "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)", + "compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", + "directories 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "vergen 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1564,6 +1609,17 @@ dependencies = [ "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "pretty_env_logger" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "proc-macro2" version = "0.3.8" @@ -1583,12 +1639,6 @@ dependencies = [ [[package]] name = "proc_macro" version = "0.0.0" -dependencies = [ - "rustc_data_structures 0.0.0", - "rustc_errors 0.0.0", - "syntax 0.0.0", - "syntax_pos 0.0.0", -] [[package]] name = "profiler_builtins" @@ -1625,6 +1675,15 @@ dependencies = [ "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "pulldown-cmark" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "quick-error" version = "1.2.2" @@ -1793,7 +1852,7 @@ dependencies = [ name = "rls" version = "1.31.6" dependencies = [ - "cargo 0.32.0", + "cargo 0.33.0", "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "clippy_lints 0.0.212", "crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1899,7 +1958,7 @@ dependencies = [ "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "chalk-engine 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-engine 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fmt_macros 0.0.0", "graphviz 0.0.0", @@ -1908,7 +1967,6 @@ dependencies = [ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "proc_macro 0.0.0", "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_apfloat 0.0.0", @@ -2048,6 +2106,7 @@ dependencies = [ name = "rustc-main" version = "0.0.0" dependencies = [ + "rustc_codegen_ssa 0.0.0", "rustc_driver 0.0.0", "rustc_target 0.0.0", ] @@ -2108,6 +2167,7 @@ version = "0.0.0" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_cratesio_shim 0.0.0", + "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2115,7 +2175,6 @@ name = "rustc_asan" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "alloc_system 0.0.0", "build_helper 0.1.0", "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", "compiler_builtins 0.0.0", @@ -2147,6 +2206,33 @@ dependencies = [ "rustc_llvm 0.0.0", ] +[[package]] +name = "rustc_codegen_ssa" +version = "0.0.0" +dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc 0.0.0", + "rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_allocator 0.0.0", + "rustc_apfloat 0.0.0", + "rustc_codegen_utils 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_fs_util 0.0.0", + "rustc_incremental 0.0.0", + "rustc_mir 0.0.0", + "rustc_target 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + [[package]] name = "rustc_codegen_utils" version = "0.0.0" @@ -2156,7 +2242,7 @@ dependencies = [ "rustc 0.0.0", "rustc_data_structures 0.0.0", "rustc_incremental 0.0.0", - "rustc_metadata_utils 0.0.0", + "rustc_metadata 0.0.0", "rustc_mir 0.0.0", "rustc_target 0.0.0", "syntax 0.0.0", @@ -2177,11 +2263,10 @@ name = "rustc_data_structures" version = "0.0.0" dependencies = [ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2198,6 +2283,7 @@ dependencies = [ "arena 0.0.0", "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", + "jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2265,6 +2351,7 @@ version = "0.0.0" dependencies = [ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc_data_structures 0.0.0", "rustc_mir 0.0.0", "rustc_target 0.0.0", "syntax 0.0.0", @@ -2284,7 +2371,6 @@ name = "rustc_lsan" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "alloc_system 0.0.0", "build_helper 0.1.0", "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", "compiler_builtins 0.0.0", @@ -2297,27 +2383,18 @@ version = "0.0.0" dependencies = [ "flate2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "proc_macro 0.0.0", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", - "rustc_metadata_utils 0.0.0", "rustc_target 0.0.0", "serialize 0.0.0", + "stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntax 0.0.0", "syntax_ext 0.0.0", "syntax_pos 0.0.0", ] -[[package]] -name = "rustc_metadata_utils" -version = "0.0.0" -dependencies = [ - "rustc 0.0.0", - "syntax 0.0.0", - "syntax_pos 0.0.0", -] - [[package]] name = "rustc_mir" version = "0.0.0" @@ -2346,7 +2423,6 @@ name = "rustc_msan" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "alloc_system 0.0.0", "build_helper 0.1.0", "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", "compiler_builtins 0.0.0", @@ -2431,6 +2507,7 @@ dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_cratesio_shim 0.0.0", + "rustc_data_structures 0.0.0", "serialize 0.0.0", ] @@ -2443,11 +2520,12 @@ name = "rustc_traits" version = "0.0.0" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "chalk-engine 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-engine 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", + "rustc_target 0.0.0", "smallvec 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -2458,7 +2536,6 @@ name = "rustc_tsan" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "alloc_system 0.0.0", "build_helper 0.1.0", "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", "compiler_builtins 0.0.0", @@ -2692,12 +2769,11 @@ name = "std" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "alloc_jemalloc 0.0.0", - "alloc_system 0.0.0", "build_helper 0.1.0", "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", "compiler_builtins 0.0.0", "core 0.0.0", + "dlmalloc 0.0.0", "libc 0.0.0", "panic_abort 0.0.0", "panic_unwind 0.0.0", @@ -2841,7 +2917,6 @@ version = "0.0.0" dependencies = [ "fmt_macros 0.0.0", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "proc_macro 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_target 0.0.0", @@ -2864,7 +2939,7 @@ dependencies = [ [[package]] name = "tar" -version = "0.4.16" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2931,6 +3006,7 @@ name = "test" version = "0.0.0" dependencies = [ "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "proc_macro 0.0.0", "term 0.0.0", ] @@ -2989,6 +3065,11 @@ dependencies = [ "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "typenum" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "ucd-util" version = "0.1.1" @@ -3216,7 +3297,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d8dfe3adeb30f7938e6c1dd5327f29235d8ada3e898aeb08c343005ec2915a2" "checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16" "checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3" -"checksum chalk-engine 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9adbe0fe1d6e937c3ee0571739a78f53c1de22f59df616060e868cf13c6c4ce5" +"checksum chalk-engine 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6749eb72e7d4355d944a99f15fbaea701b978c18c5e184a025fcde942b0c9779" "checksum chalk-macros 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "295635afd6853aa9f20baeb7f0204862440c0fe994c5a253d5f479dac41d047e" "checksum chrono 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6962c635d530328acc53ac6a955e83093fedc91c5809dfac1fa60fa470830a37" "checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e" @@ -3225,7 +3306,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0aa3473e85a3161b59845d6096b289bb577874cafeaf75ea1b1beaa6572c7fc" "checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007" "checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2" -"checksum compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)" = "d3064bc712922596dd5ab449fca9261d411893356581fe5297b96aa8f53bb1b8" +"checksum compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "89747fe073b7838343bd2c2445e7a7c2e0d415598f8925f0fa9205b9cdfc48cb" "checksum core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc3532ec724375c7cb7ff0a097b714fde180bb1f6ed2ab27cfcd99ffca873cd2" "checksum core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a3fb15cdbdd9cf8b82d97d0296bb5cd3631bba58d6e31650a002a8e7fb5721f9" "checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19" @@ -3235,18 +3316,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum crossbeam-epoch 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c90f1474584f38e270b5b613e898c8c328aa4f3dea85e0a27ac2e642f009416" "checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9" "checksum crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "677d453a17e8bd2b913fa38e8b9cf04bcdbb5be790aa294f2389661d72036015" +"checksum crossbeam-utils 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c55913cc2799171a550e307918c0a360e8c16004820291bf3b638969b4a01816" "checksum crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "09de9ee0fc255ace04c7fa0763c9395a945c37c8292bb554f8d48361d1dcf1b4" -"checksum curl 0.4.18 (registry+https://github.com/rust-lang/crates.io-index)" = "a9e5285b49b44401518c947d3b808d14d99a538a6c9ffb3ec0205c11f9fc4389" -"checksum curl-sys 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "08459503c415173da1ce6b41036a37b8bfdd86af46d45abb9964d4c61fe670ef" +"checksum curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)" = "c7c9d851c825e0c033979d4516c9173bc19a78a96eb4d6ae51d4045440eafa16" +"checksum curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)" = "721c204978be2143fab0a84b708c49d79d1f6100b8785610f456043a90708870" "checksum datafrog 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16d724bf4ffe77cdceeecd461009b5f8d9e23c5d645d68bedb4586bf43e7e142" "checksum derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ceed73957c449214f8440eec8ad7fa282b67dc9eacbb24a3085b15d60397a17a" "checksum derive_more 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f57d78cf3bd45270dad4e70c21ec77a960b36c7a841ff9db76aaa775a8fb871" "checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" "checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +"checksum directories 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "72d337a64190607d4fcca2cb78982c5dd57f4916e19696b48a575fa746b6cb0f" "checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0" "checksum elasticlunr-rs 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4837d77a1e157489a3933b743fd774ae75074e0e390b2b7f071530048a0d87ee" +"checksum ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f56c93cc076508c549d9bb747f79aa9b4eb098be7b8cad8830c3137ef52d1e00" "checksum ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "88dc8393b3c7352f94092497f6b52019643e493b6b890eb417cdb7c46117e621" "checksum env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)" = "f4d7e69c283751083d53d01eac767407343b8b69c4bd70058e08adc2637cb257" +"checksum env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afb070faf94c85d17d50ca44f6ad076bce18ae92f0037d350947240a36e9d42e" "checksum environment 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f4b14e20978669064c33b4c1e0fb4083412e40fe56cbea2eae80fd7591503ee" "checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" "checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02" @@ -3259,6 +3344,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" "checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" "checksum fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +"checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" "checksum fst 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d94485a00b1827b861dd9d1a2cc9764f9044d4c535514c0760a5a2012ef3399f" "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" @@ -3278,9 +3364,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" "checksum if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4bac95d9aa0624e7b78187d6fb8ab012b41d9f6f54b1bcb61e61c4845f8357ec" "checksum ignore 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3e9faa7c84064f07b40da27044af629f578bc7994b650d3e458d0c29183c1d91" +"checksum im-rc 12.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4591152fd573cf453a890b5f9fdc5c328a751a0785539316739d5f85e5c468c" "checksum is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7e5b386aef33a1c677be65237cb9d32c3f3ef56bd035949710c4bb13083eb053" "checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450" "checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b" +"checksum jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "bfc62c8e50e381768ce8ee0428ee53741929f7ebd73e4d83f669bcf7693e00ae" "checksum jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "60af5f849e1981434e4a31d3d782c4774ae9b434ce55b101a96ecfd09147e8be" "checksum json 0.11.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9ad0485404155f45cce53a40d4b2d6ac356418300daed05273d9e26f91c390be" "checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c" @@ -3342,10 +3430,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b6b0a7f5f4278b991ffd14abce1d01b013121ad297460237ef0a2f08d43201" "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" "checksum pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a029430f0d744bc3d15dd474d591bed2402b645d024583082b9f63bb936dac6" +"checksum pretty_env_logger 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8d1e63042e889b85228620629b51c011d380eed2c7e0015f8a644def280c28" "checksum proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1b06e2f335f48d24442b35a19df506a835fb3547bc3c06ef27340da9acf5cae7" "checksum proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)" = "77619697826f31a02ae974457af0b29b723e5619e113e9397b8b82c6bd253f09" "checksum proptest 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)" = "926d0604475349f463fe44130aae73f2294b5309ab2ca0310b998bd334ef191f" "checksum pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d6fdf85cda6cadfae5428a54661d431330b312bc767ddbc57adbedc24da66e32" +"checksum pulldown-cmark 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eef52fac62d0ea7b9b4dc7da092aa64ea7ec3d90af6679422d3d7e0e14b6ee15" "checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" "checksum quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "07589615d719a60c8dd8a4622e7946465dfef20d1a428f969e3443e7386d5f45" "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" @@ -3416,7 +3506,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum syn 0.15.21 (registry+https://github.com/rust-lang/crates.io-index)" = "816b7af21405b011a23554ea2dc3f6576dc86ca557047c34098c1d741f10f823" "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" "checksum synstructure 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85bb9b7550d063ea184027c9b8c20ac167cd36d3e06b3a40bceb9d746dc1a7b7" -"checksum tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)" = "e8f41ca4a5689f06998f0247fcb60da6c760f1950cc9df2a10d71575ad0b062a" +"checksum tar 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)" = "69e16840a1e0a1f1a880b739ef1cc6a4b85496c99b8aa786ccffce6e0c15624c" "checksum tempfile 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c4b103c6d08d323b92ff42c8ce62abcd83ca8efa7fd5bf7927efefec75f58c76" "checksum tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9de21546595a0873061940d994bbbc5c35f024ae4fd61ec5c5b159115684f508" "checksum term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6b677dd1e8214ea1ef4297f85dbcbed8e8cdddb561040cc998ca2551c37561" @@ -3427,6 +3517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b" "checksum toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a0263c6c02c4db6c8f7681f9fd35e90de799ebd4cfdeab77a38f4ff6b3d8c0d9" "checksum toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6854664bfc6df0360c695480836ee90e2d0c965f06db291d10be9344792d43e8" +"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" "checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6a0180bc61fc5a987082bfa111f4cc95c4caff7f9799f3e46df09163a937aa25" diff --git a/src/Cargo.toml b/Cargo.toml similarity index 51% rename from src/Cargo.toml rename to Cargo.toml index ee997a6ca4..b763caa97d 100644 --- a/src/Cargo.toml +++ b/Cargo.toml @@ -1,31 +1,34 @@ [workspace] members = [ - "bootstrap", - "rustc", - "libstd", - "libtest", - "librustc_codegen_llvm", - "tools/cargotest", - "tools/clippy", - "tools/compiletest", - "tools/error_index_generator", - "tools/linkchecker", - "tools/rustbook", - "tools/unstable-book-gen", - "tools/tidy", - "tools/build-manifest", - "tools/remote-test-client", - "tools/remote-test-server", - "tools/rust-installer", - "tools/cargo", - "tools/rustdoc", - "tools/rls", - "tools/rustfmt", - "tools/miri", - "tools/rustdoc-themes", + "src/bootstrap", + "src/rustc", + "src/libstd", + "src/libtest", + "src/librustc_codegen_llvm", + "src/tools/cargotest", + "src/tools/clippy", + "src/tools/compiletest", + "src/tools/error_index_generator", + "src/tools/linkchecker", + "src/tools/rustbook", + "src/tools/unstable-book-gen", + "src/tools/tidy", + "src/tools/build-manifest", + "src/tools/remote-test-client", + "src/tools/remote-test-server", + "src/tools/rust-installer", + "src/tools/cargo", + "src/tools/rustdoc", + "src/tools/rls", + "src/tools/rustfmt", + "src/tools/miri", + "src/tools/rustdoc-themes", ] exclude = [ - "tools/rls/test_data", + "src/tools/rls/test_data", + "build", + # HACK(eddyb) This hardcodes the fact that our CI uses `/checkout/obj`. + "obj", ] # Curiously, LLVM 7.0 will segfault if compiled with opt-level=3 @@ -50,18 +53,18 @@ debug-assertions = false # so we use a `[patch]` here to override the github repository with our local # vendored copy. [patch."https://github.com/rust-lang/cargo"] -cargo = { path = "tools/cargo" } +cargo = { path = "src/tools/cargo" } [patch.crates-io] # Similar to Cargo above we want the RLS to use a vendored version of `rustfmt` # that we're shipping as well (to ensure that the rustfmt in RLS and the # `rustfmt` executable are the same exact version). -rustfmt-nightly = { path = "tools/rustfmt" } +rustfmt-nightly = { path = "src/tools/rustfmt" } -# See comments in `tools/rustc-workspace-hack/README.md` for what's going on +# See comments in `src/tools/rustc-workspace-hack/README.md` for what's going on # here -rustc-workspace-hack = { path = 'tools/rustc-workspace-hack' } +rustc-workspace-hack = { path = 'src/tools/rustc-workspace-hack' } [patch."https://github.com/rust-lang/rust-clippy"] -clippy_lints = { path = "tools/clippy/clippy_lints" } -rustc_tools_util = { path = "tools/clippy/rustc_tools_util" } +clippy_lints = { path = "src/tools/clippy/clippy_lints" } +rustc_tools_util = { path = "src/tools/clippy/rustc_tools_util" } diff --git a/README.md b/README.md index 0e5b7170bc..37442661bc 100644 --- a/README.md +++ b/README.md @@ -233,7 +233,7 @@ Also, you may find the [rustdocs for the compiler itself][rustdocs] useful. [IRC]: https://en.wikipedia.org/wiki/Internet_Relay_Chat [#rust]: irc://irc.mozilla.org/rust [#rust-beginners]: irc://irc.mozilla.org/rust-beginners -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/about-this-guide.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/about-this-guide.html [rustdocs]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ ## License diff --git a/RELEASES.md b/RELEASES.md index db4f6aaa77..da09af3edf 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -183,7 +183,7 @@ Misc [cargo/5877]: https://github.com/rust-lang/cargo/pull/5877/ [cargo/5878]: https://github.com/rust-lang/cargo/pull/5878/ [cargo/5995]: https://github.com/rust-lang/cargo/pull/5995/ -[proc-macros]: https://doc.rust-lang.org/stable/book/2018-edition/ch19-06-macros.html +[proc-macros]: https://doc.rust-lang.org/nightly/book/2018-edition/ch19-06-macros.html [`Ipv4Addr::BROADCAST`]: https://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#associatedconstant.BROADCAST [`Ipv4Addr::LOCALHOST`]: https://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#associatedconstant.LOCALHOST diff --git a/config.toml.example b/config.toml.example index e8cb0cba6b..f75e220de4 100644 --- a/config.toml.example +++ b/config.toml.example @@ -277,6 +277,10 @@ # compiler. #codegen-units = 1 +# Sets the number of codegen units to build the standard library with, +# regardless of what the codegen-unit setting for the rest of the compiler is. +#codegen-units-std = 1 + # Whether or not debug assertions are enabled for the compiler and standard # library. Also enables compilation of debug! and trace! logging macros. #debug-assertions = false @@ -296,12 +300,6 @@ # Adding debuginfo makes them several times larger. #debuginfo-tools = false -# Whether or not jemalloc is built and enabled -#use-jemalloc = true - -# Whether or not jemalloc is built with its debug option set -#debug-jemalloc = false - # Whether or not `panic!`s generate backtraces (RUST_BACKTRACE) #backtrace = true @@ -398,6 +396,15 @@ # generally only set for releases #remap-debuginfo = false +# Link the compiler against `jemalloc`, where on Linux and OSX it should +# override the default allocator for rustc and LLVM. +#jemalloc = false + +# Run tests in various test suites with the "nll compare mode" in addition to +# running the tests in normal mode. Largely only used on CI and during local +# development of NLL +#test-compare-mode = false + # ============================================================================= # Options for specific targets # @@ -437,10 +444,6 @@ # not, you can specify an explicit file name for it. #llvm-filecheck = "/path/to/FileCheck" -# Path to the custom jemalloc static library to link into the standard library -# by default. This is only used if jemalloc is still enabled above -#jemalloc = "/path/to/jemalloc/libjemalloc_pic.a" - # If this target is for Android, this option will be required to specify where # the NDK for the target lives. This is used to find the C compiler to link and # build native code. diff --git a/git-commit-hash b/git-commit-hash index b09edf56f5..3d87efb416 100644 --- a/git-commit-hash +++ b/git-commit-hash @@ -1 +1 @@ -abe02cefd6cd1916df62ad7dc80161bea50b72e8 \ No newline at end of file +a01e4761a1507939430d4044a5f0a35fdb2b146c \ No newline at end of file diff --git a/src/README.md b/src/README.md index 6da4944c39..6522891586 100644 --- a/src/README.md +++ b/src/README.md @@ -5,11 +5,11 @@ This directory contains the source code of the rust project, including: For more information on how various parts of the compiler work, see the [rustc guide]. -Their is also useful content in the following READMEs, which are gradually being moved over to the guide: +There is also useful content in the following READMEs, which are gradually being moved over to the guide: - https://github.com/rust-lang/rust/tree/master/src/librustc/ty/query - https://github.com/rust-lang/rust/tree/master/src/librustc/dep_graph - https://github.com/rust-lang/rust/blob/master/src/librustc/infer/region_constraints - https://github.com/rust-lang/rust/tree/master/src/librustc/infer/higher_ranked - https://github.com/rust-lang/rust/tree/master/src/librustc/infer/lexical_region_resolve -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/about-this-guide.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/about-this-guide.html diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index b6764c1aae..d18a48e5d2 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -129,10 +129,12 @@ fn main() { // Help the libc crate compile by assisting it in finding the MUSL // native libraries. if let Some(s) = env::var_os("MUSL_ROOT") { - let mut root = OsString::from("native="); - root.push(&s); - root.push("/lib"); - cmd.arg("-L").arg(&root); + if target.contains("musl") { + let mut root = OsString::from("native="); + root.push(&s); + root.push("/lib"); + cmd.arg("-L").arg(&root); + } } // Override linker if necessary. @@ -232,7 +234,9 @@ fn main() { // flesh out rpath support more fully in the future. cmd.arg("-Z").arg("osx-rpath-install-name"); Some("-Wl,-rpath,@loader_path/../lib") - } else if !target.contains("windows") && !target.contains("wasm32") { + } else if !target.contains("windows") && + !target.contains("wasm32") && + !target.contains("fuchsia") { Some("-Wl,-rpath,$ORIGIN/../lib") } else { None @@ -253,8 +257,15 @@ fn main() { // When running miri tests, we need to generate MIR for all libraries if env::var("TEST_MIRI").ok().map_or(false, |val| val == "true") { + // The flags here should be kept in sync with `add_miri_default_args` + // in miri's `src/lib.rs`. cmd.arg("-Zalways-encode-mir"); - cmd.arg("-Zmir-emit-validate=1"); + // These options are preferred by miri, to be able to perform better validation, + // but the bootstrap compiler might not understand them. + if stage != "0" { + cmd.arg("-Zmir-emit-retag"); + cmd.arg("-Zmir-opt-level=0"); + } } // Force all crates compiled by this compiler to (a) be unstable and (b) diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index b5dc0090c8..d143dffb24 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -79,8 +79,8 @@ def _download(path, url, probably_big, verbose, exception): # see http://serverfault.com/questions/301128/how-to-download if sys.platform == 'win32': run(["PowerShell.exe", "/nologo", "-Command", - "(New-Object System.Net.WebClient)" - ".DownloadFile('{}', '{}')".format(url, path)], + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12;", + "(New-Object System.Net.WebClient).DownloadFile('{}', '{}')".format(url, path)], verbose=verbose, exception=exception) else: @@ -715,11 +715,6 @@ class RustBuild(object): backends = self.get_toml('codegen-backends') if backends is None or not 'emscripten' in backends: continue - if module.endswith("jemalloc"): - if self.get_toml('use-jemalloc') == 'false': - continue - if self.get_toml('jemalloc'): - continue if module.endswith("lld"): config = self.get_toml('lld') if config is None or config == 'false': @@ -806,7 +801,7 @@ def bootstrap(help_triggered): registry = 'https://example.com' [source.vendored-sources] - directory = '{}/src/vendor' + directory = '{}/vendor' """.format(build.rust_root)) else: if os.path.exists('.cargo'): diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 02e9ca9250..98005f9396 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -390,7 +390,6 @@ impl<'a> Builder<'a> { test::RunPassFullDeps, test::RunFailFullDeps, test::CompileFailFullDeps, - test::IncrementalFullDeps, test::Rustdoc, test::Pretty, test::RunPassPretty, @@ -714,7 +713,7 @@ impl<'a> Builder<'a> { "build" => self.cargo_out(compiler, mode, target), // This is the intended out directory for crate documentation. - "doc" => self.crate_doc_out(target), + "doc" | "rustdoc" => self.crate_doc_out(target), _ => self.stage_out(compiler, mode), }; @@ -743,7 +742,7 @@ impl<'a> Builder<'a> { _ => compile::librustc_stamp(self, cmp, target), }; - if cmd == "doc" { + if cmd == "doc" || cmd == "rustdoc" { if mode == Mode::Rustc || mode == Mode::ToolRustc || mode == Mode::Codegen { // This is the intended out directory for compiler documentation. my_out = self.compiler_doc_out(target); @@ -883,7 +882,7 @@ impl<'a> Builder<'a> { .env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc")) .env( "RUSTDOC_REAL", - if cmd == "doc" || (cmd == "test" && want_rustdoc) { + if cmd == "doc" || cmd == "rustdoc" || (cmd == "test" && want_rustdoc) { self.rustdoc(compiler.host) } else { PathBuf::from("/path/to/nowhere/rustdoc/not/required") @@ -1120,10 +1119,15 @@ impl<'a> Builder<'a> { cargo.arg("-v"); } - // This must be kept before the thinlto check, as we set codegen units - // to 1 forcibly there. - if let Some(n) = self.config.rust_codegen_units { - cargo.env("RUSTC_CODEGEN_UNITS", n.to_string()); + match (mode, self.config.rust_codegen_units_std, self.config.rust_codegen_units) { + (Mode::Std, Some(n), _) | + (Mode::Test, Some(n), _) | + (_, _, Some(n)) => { + cargo.env("RUSTC_CODEGEN_UNITS", n.to_string()); + } + _ => { + // Don't set anything + } } if self.config.rust_optimize { diff --git a/src/bootstrap/channel.rs b/src/bootstrap/channel.rs index 91bec69cfa..88b6925b2b 100644 --- a/src/bootstrap/channel.rs +++ b/src/bootstrap/channel.rs @@ -24,7 +24,7 @@ use Build; use config::Config; // The version number -pub const CFG_RELEASE_NUM: &str = "1.31.0"; +pub const CFG_RELEASE_NUM: &str = "1.32.0"; pub struct GitInfo { inner: Option, diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index 69d45acded..b6bb11d07e 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -22,7 +22,7 @@ use std::fs::{self, File}; use std::io::BufReader; use std::io::prelude::*; use std::path::{Path, PathBuf}; -use std::process::{Command, Stdio}; +use std::process::{Command, Stdio, exit}; use std::str; use build_helper::{output, mtime, up_to_date}; @@ -69,7 +69,7 @@ impl Step for Std { if builder.config.keep_stage.contains(&compiler.stage) { builder.info("Warning: Using a potentially old libstd. This may not behave well."); builder.ensure(StdLink { - compiler: compiler, + compiler, target_compiler: compiler, target, }); @@ -158,16 +158,7 @@ pub fn std_cargo(builder: &Builder, .arg("--manifest-path") .arg(builder.src.join("src/rustc/compiler_builtins_shim/Cargo.toml")); } else { - let mut features = builder.std_features(); - - // When doing a local rebuild we tell cargo that we're stage1 rather than - // stage0. This works fine if the local rust and being-built rust have the - // same view of what the default allocator is, but fails otherwise. Since - // we don't have a way to express an allocator preference yet, work - // around the issue in the case of a local rebuild with jemalloc disabled. - if compiler.stage == 0 && builder.local_rebuild && !builder.config.use_jemalloc { - features.push_str(" force_alloc_system"); - } + let features = builder.std_features(); if compiler.stage != 0 && builder.config.sanitizers { // This variable is used by the sanitizer runtime crates, e.g. @@ -188,11 +179,6 @@ pub fn std_cargo(builder: &Builder, .arg("--manifest-path") .arg(builder.src.join("src/libstd/Cargo.toml")); - if let Some(target) = builder.config.target_config.get(&target) { - if let Some(ref jemalloc) = target.jemalloc { - cargo.env("JEMALLOC_OVERRIDE", jemalloc); - } - } if target.contains("musl") { if let Some(p) = builder.musl_root(target) { cargo.env("MUSL_ROOT", p); @@ -217,7 +203,7 @@ impl Step for StdLink { /// Link all libstd rlibs/dylibs into the sysroot location. /// - /// Links those artifacts generated by `compiler` to a the `stage` compiler's + /// Links those artifacts generated by `compiler` to the `stage` compiler's /// sysroot for the specified `host` and `target`. /// /// Note that this assumes that `compiler` has already generated the libstd @@ -358,7 +344,7 @@ impl Step for Test { if builder.config.keep_stage.contains(&compiler.stage) { builder.info("Warning: Using a potentially old libtest. This may not behave well."); builder.ensure(TestLink { - compiler: compiler, + compiler, target_compiler: compiler, target, }); @@ -480,7 +466,7 @@ impl Step for Rustc { if builder.config.keep_stage.contains(&compiler.stage) { builder.info("Warning: Using a potentially old librustc. This may not behave well."); builder.ensure(RustcLink { - compiler: compiler, + compiler, target_compiler: compiler, target, }); @@ -750,7 +736,7 @@ pub fn build_codegen_backend(builder: &Builder, // Pass down configuration from the LLVM build into the build of // librustc_llvm and librustc_codegen_llvm. - if builder.is_rust_llvm(target) { + if builder.is_rust_llvm(target) && backend != "emscripten" { cargo.env("LLVM_RUSTLLVM", "1"); } cargo.env("LLVM_CONFIG", &llvm_config); @@ -816,8 +802,8 @@ fn copy_codegen_backends_to_sysroot(builder: &Builder, let filename = file.file_name().unwrap().to_str().unwrap(); // change `librustc_codegen_llvm-xxxxxx.so` to `librustc_codegen_llvm-llvm.so` let target_filename = { - let dash = filename.find("-").unwrap(); - let dot = filename.find(".").unwrap(); + let dash = filename.find('-').unwrap(); + let dot = filename.find('.').unwrap(); format!("{}-{}{}", &filename[..dash], backend, @@ -1112,7 +1098,7 @@ pub fn run_cargo(builder: &Builder, }); if !ok { - panic!("cargo must succeed"); + exit(1); } // Ok now we need to actually find all the files listed in `toplevel`. We've diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index a9d330e06a..8fc2d5787c 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -58,6 +58,7 @@ pub struct Config { pub ignore_git: bool, pub exclude: Vec, pub rustc_error_format: Option, + pub test_compare_mode: bool, pub run_host_only: bool, @@ -95,6 +96,7 @@ pub struct Config { // rust codegen options pub rust_optimize: bool, pub rust_codegen_units: Option, + pub rust_codegen_units_std: Option, pub rust_debug_assertions: bool, pub rust_debuginfo: bool, pub rust_debuginfo_lines: bool, @@ -115,6 +117,7 @@ pub struct Config { pub hosts: Vec>, pub targets: Vec>, pub local_rebuild: bool, + pub jemalloc: bool, // dist misc pub dist_sign_folder: Option, @@ -122,8 +125,6 @@ pub struct Config { pub dist_gpg_password_file: Option, // libstd features - pub debug_jemalloc: bool, - pub use_jemalloc: bool, pub backtrace: bool, // support for RUST_BACKTRACE pub wasm_syscall: bool, @@ -165,7 +166,6 @@ pub struct Target { pub llvm_config: Option, /// Some(path to FileCheck) if one was specified. pub llvm_filecheck: Option, - pub jemalloc: Option, pub cc: Option, pub cxx: Option, pub ar: Option, @@ -262,7 +262,7 @@ struct Llvm { link_jobs: Option, link_shared: Option, version_suffix: Option, - clang_cl: Option + clang_cl: Option, } #[derive(Deserialize, Default, Clone)] @@ -294,14 +294,13 @@ impl Default for StringOrBool { struct Rust { optimize: Option, codegen_units: Option, + codegen_units_std: Option, debug_assertions: Option, debuginfo: Option, debuginfo_lines: Option, debuginfo_only_std: Option, debuginfo_tools: Option, experimental_parallel_queries: Option, - debug_jemalloc: Option, - use_jemalloc: Option, backtrace: Option, default_linker: Option, channel: Option, @@ -327,6 +326,8 @@ struct Rust { backtrace_on_ice: Option, verify_llvm_ir: Option, remap_debuginfo: Option, + jemalloc: Option, + test_compare_mode: Option, } /// TOML representation of how each build target is configured. @@ -335,7 +336,6 @@ struct Rust { struct TomlTarget { llvm_config: Option, llvm_filecheck: Option, - jemalloc: Option, cc: Option, cxx: Option, ar: Option, @@ -361,7 +361,6 @@ impl Config { config.llvm_enabled = true; config.llvm_optimize = true; config.llvm_version_check = true; - config.use_jemalloc = true; config.backtrace = true; config.rust_optimize = true; config.rust_optimize_tests = true; @@ -497,7 +496,6 @@ impl Config { let mut debuginfo_only_std = None; let mut debuginfo_tools = None; let mut debug = None; - let mut debug_jemalloc = None; let mut debuginfo = None; let mut debug_assertions = None; let mut optimize = None; @@ -539,12 +537,12 @@ impl Config { debuginfo_tools = rust.debuginfo_tools; optimize = rust.optimize; ignore_git = rust.ignore_git; - debug_jemalloc = rust.debug_jemalloc; set(&mut config.rust_optimize_tests, rust.optimize_tests); set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests); set(&mut config.codegen_tests, rust.codegen_tests); set(&mut config.rust_rpath, rust.rpath); - set(&mut config.use_jemalloc, rust.use_jemalloc); + set(&mut config.jemalloc, rust.jemalloc); + set(&mut config.test_compare_mode, rust.test_compare_mode); set(&mut config.backtrace, rust.backtrace); set(&mut config.channel, rust.channel.clone()); set(&mut config.rust_dist_src, rust.dist_src); @@ -580,6 +578,8 @@ impl Config { Some(n) => config.rust_codegen_units = Some(n), None => {} } + + config.rust_codegen_units_std = rust.codegen_units_std; } if let Some(ref t) = toml.target { @@ -592,9 +592,6 @@ impl Config { if let Some(ref s) = cfg.llvm_filecheck { target.llvm_filecheck = Some(config.src.join(s)); } - if let Some(ref s) = cfg.jemalloc { - target.jemalloc = Some(config.src.join(s)); - } if let Some(ref s) = cfg.android_ndk { target.ndk = Some(config.src.join(s)); } @@ -640,7 +637,6 @@ impl Config { config.rust_debuginfo_tools = debuginfo_tools.unwrap_or(false); let default = debug == Some(true); - config.debug_jemalloc = debug_jemalloc.unwrap_or(default); config.rust_debuginfo = debuginfo.unwrap_or(default); config.rust_debug_assertions = debug_assertions.unwrap_or(default); diff --git a/src/bootstrap/configure.py b/src/bootstrap/configure.py index 0cf84a6298..5467c9f9d5 100755 --- a/src/bootstrap/configure.py +++ b/src/bootstrap/configure.py @@ -40,7 +40,7 @@ def v(*args): options.append(Option(*args, value=True)) -o("debug", "rust.debug", "debug mode; disables optimization unless `--enable-optimize` given") +o("debug", "rust.debug", "enables debugging environment; does not affect optimization of bootstrapped code (use `--disable-optimize` for that)") o("docs", "build.docs", "build standard library documentation") o("compiler-docs", "build.compiler-docs", "build compiler documentation") o("optimize-tests", "rust.optimize-tests", "build tests with optimizations") @@ -68,6 +68,7 @@ o("cargo-native-static", "build.cargo-native-static", "static native libraries i o("profiler", "build.profiler", "build the profiler runtime") o("emscripten", None, "compile the emscripten backend as well as LLVM") o("full-tools", None, "enable all tools") +o("lld", "rust.lld", "build lld") o("lldb", "rust.lldb", "build lldb") o("missing-tools", "dist.missing-tools", "allow failures when building tools") @@ -82,7 +83,6 @@ o("debuginfo", "rust.debuginfo", "build with debugger metadata") o("debuginfo-lines", "rust.debuginfo-lines", "build with line number debugger metadata") o("debuginfo-only-std", "rust.debuginfo-only-std", "build only libstd with debugging information") o("debuginfo-tools", "rust.debuginfo-tools", "build extended tools with debugging information") -o("debug-jemalloc", "rust.debug-jemalloc", "build jemalloc with --enable-debug --enable-fill") v("save-toolstates", "rust.save-toolstates", "save build and test status of external tools into this file") v("prefix", "install.prefix", "set installation prefix") @@ -99,7 +99,6 @@ v("llvm-root", None, "set LLVM root") v("llvm-config", None, "set path to llvm-config") v("llvm-filecheck", None, "set path to LLVM's FileCheck utility") v("python", "build.python", "set path to python") -v("jemalloc-root", None, "set directory where libjemalloc_pic.a is located") v("android-cross-path", "target.arm-linux-androideabi.android-ndk", "Android NDK standalone path (deprecated)") v("i686-linux-android-ndk", "target.i686-linux-android.android-ndk", @@ -148,7 +147,6 @@ v("default-linker", "rust.default-linker", "the default linker") # Many of these are saved below during the "writing configuration" step # (others are conditionally saved). o("manage-submodules", "build.submodules", "let the build manage the git submodules") -o("jemalloc", "rust.use-jemalloc", "build liballoc with jemalloc") o("full-bootstrap", "build.full-bootstrap", "build three compilers instead of two") o("extended", "build.extended", "build an extended rust tool set") @@ -330,8 +328,6 @@ for key in known_args: set('target.{}.llvm-config'.format(build()), value) elif option.name == 'llvm-filecheck': set('target.{}.llvm-filecheck'.format(build()), value) - elif option.name == 'jemalloc-root': - set('target.{}.jemalloc'.format(build()), value + '/libjemalloc_pic.a') elif option.name == 'tools': set('build.tools', value.split(',')) elif option.name == 'host': @@ -393,6 +389,13 @@ for target in configured_targets: targets[target][0] = targets[target][0].replace("x86_64-unknown-linux-gnu", target) +def is_number(value): + try: + float(value) + return True + except ValueError: + return False + # Here we walk through the constructed configuration we have from the parsed # command line arguments. We then apply each piece of configuration by # basically just doing a `sed` to change the various configuration line to what @@ -406,7 +409,11 @@ def to_toml(value): elif isinstance(value, list): return '[' + ', '.join(map(to_toml, value)) + ']' elif isinstance(value, str): - return "'" + value + "'" + # Don't put quotes around numeric values + if is_number(value): + return value + else: + return "'" + value + "'" else: raise RuntimeError('no toml') diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index fea6302d0a..cd8d5642b2 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -851,7 +851,7 @@ impl Step for Src { t!(fs::create_dir_all(&dst_src)); let src_files = [ - "src/Cargo.lock", + "Cargo.lock", ]; // This is the reduced set of paths which will become the rust-src component // (essentially libstd and all of its path dependencies) @@ -859,8 +859,6 @@ impl Step for Src { "src/build_helper", "src/dlmalloc", "src/liballoc", - "src/liballoc_jemalloc", - "src/liballoc_system", "src/libbacktrace", "src/libcompiler_builtins", "src/libcore", @@ -878,13 +876,12 @@ impl Step for Src { "src/rustc/dlmalloc_shim", "src/libtest", "src/libterm", - "src/jemalloc", "src/libprofiler_builtins", "src/stdsimd", + "src/libproc_macro", ]; let std_src_dirs_exclude = [ "src/libcompiler_builtins/compiler-rt/test", - "src/jemalloc/test/unit", ]; copy_src_dirs(builder, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src); @@ -911,7 +908,7 @@ impl Step for Src { } } -const CARGO_VENDOR_VERSION: &str = "0.1.4"; +const CARGO_VENDOR_VERSION: &str = "0.1.19"; #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct PlainSourceTarball; @@ -952,6 +949,8 @@ impl Step for PlainSourceTarball { "configure", "x.py", "config.toml.example", + "Cargo.toml", + "Cargo.lock", ]; let src_dirs = [ "src", @@ -995,7 +994,7 @@ impl Step for PlainSourceTarball { // Vendor all Cargo dependencies let mut cmd = Command::new(&builder.initial_cargo); cmd.arg("vendor") - .current_dir(&plain_dst_src.join("src")); + .current_dir(&plain_dst_src); builder.run(&mut cmd); } diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index 85767936fc..201129b92d 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -410,14 +410,15 @@ impl Step for Standalone { cmd.arg("--html-after-content").arg(&footer) .arg("--html-before-content").arg(&version_info) .arg("--html-in-header").arg(&favicon) + .arg("--markdown-no-toc") + .arg("--index-page").arg(&builder.src.join("src/doc/index.md")) .arg("--markdown-playground-url") .arg("https://play.rust-lang.org/") .arg("-o").arg(&out) .arg(&path); if filename == "not_found.md" { - cmd.arg("--markdown-no-toc") - .arg("--markdown-css") + cmd.arg("--markdown-css") .arg("https://doc.rust-lang.org/rust.css"); } else { cmd.arg("--markdown-css").arg("rust.css"); @@ -485,23 +486,31 @@ impl Step for Std { // will also directly handle merging. let my_out = builder.crate_doc_out(target); t!(symlink_dir_force(&builder.config, &my_out, &out_dir)); + t!(fs::copy(builder.src.join("src/doc/rust.css"), out.join("rust.css"))); - let mut cargo = builder.cargo(compiler, Mode::Std, target, "doc"); - compile::std_cargo(builder, &compiler, target, &mut cargo); + let run_cargo_rustdoc_for = |package: &str| { + let mut cargo = builder.cargo(compiler, Mode::Std, target, "rustdoc"); + compile::std_cargo(builder, &compiler, target, &mut cargo); - // Keep a whitelist so we do not build internal stdlib crates, these will be - // build by the rustc step later if enabled. - cargo.arg("--no-deps"); - for krate in &["alloc", "core", "std"] { - cargo.arg("-p").arg(krate); + // Keep a whitelist so we do not build internal stdlib crates, these will be + // build by the rustc step later if enabled. + cargo.arg("-Z").arg("unstable-options") + .arg("-p").arg(package); // Create all crate output directories first to make sure rustdoc uses // relative links. // FIXME: Cargo should probably do this itself. - t!(fs::create_dir_all(out_dir.join(krate))); - } + t!(fs::create_dir_all(out_dir.join(package))); + cargo.arg("--") + .arg("--markdown-css").arg("rust.css") + .arg("--markdown-no-toc") + .arg("--index-page").arg(&builder.src.join("src/doc/index.md")); - builder.run(&mut cargo); - builder.cp_r(&my_out, &out); + builder.run(&mut cargo); + builder.cp_r(&my_out, &out); + }; + for krate in &["alloc", "core", "std"] { + run_cargo_rustdoc_for(krate); + } } } @@ -906,13 +915,13 @@ fn symlink_dir_force(config: &Config, src: &Path, dst: &Path) -> io::Result<()> } if let Ok(m) = fs::symlink_metadata(dst) { if m.file_type().is_dir() { - try!(fs::remove_dir_all(dst)); + fs::remove_dir_all(dst)?; } else { // handle directory junctions on windows by falling back to // `remove_dir`. - try!(fs::remove_file(dst).or_else(|_| { + fs::remove_file(dst).or_else(|_| { fs::remove_dir(dst) - })); + })?; } } diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs index 2084b8bdb6..1211d485d1 100644 --- a/src/bootstrap/flags.rs +++ b/src/bootstrap/flags.rs @@ -93,8 +93,7 @@ impl Default for Subcommand { impl Flags { pub fn parse(args: &[String]) -> Flags { let mut extra_help = String::new(); - let mut subcommand_help = format!( - "\ + let mut subcommand_help = String::from("\ Usage: x.py [options] [...] Subcommands: @@ -365,8 +364,8 @@ Arguments: } let cmd = match subcommand.as_str() { - "build" => Subcommand::Build { paths: paths }, - "check" => Subcommand::Check { paths: paths }, + "build" => Subcommand::Build { paths }, + "check" => Subcommand::Check { paths }, "test" => Subcommand::Test { paths, bless: matches.opt_present("bless"), @@ -386,9 +385,9 @@ Arguments: paths, test_args: matches.opt_strs("test-args"), }, - "doc" => Subcommand::Doc { paths: paths }, + "doc" => Subcommand::Doc { paths }, "clean" => { - if paths.len() > 0 { + if !paths.is_empty() { println!("\nclean does not take a path argument\n"); usage(1, &opts, &subcommand_help, &extra_help); } @@ -413,11 +412,11 @@ Arguments: keep_stage: matches.opt_strs("keep-stage") .into_iter().map(|j| j.parse().unwrap()) .collect(), - host: split(matches.opt_strs("host")) + host: split(&matches.opt_strs("host")) .into_iter() .map(|x| INTERNER.intern_string(x)) .collect::>(), - target: split(matches.opt_strs("target")) + target: split(&matches.opt_strs("target")) .into_iter() .map(|x| INTERNER.intern_string(x)) .collect::>(), @@ -425,7 +424,7 @@ Arguments: jobs: matches.opt_str("jobs").map(|j| j.parse().unwrap()), cmd, incremental: matches.opt_present("incremental"), - exclude: split(matches.opt_strs("exclude")) + exclude: split(&matches.opt_strs("exclude")) .into_iter() .map(|p| p.into()) .collect::>(), @@ -488,7 +487,7 @@ impl Subcommand { } } -fn split(s: Vec) -> Vec { +fn split(s: &[String]) -> Vec { s.iter() .flat_map(|s| s.split(',')) .map(|s| s.to_string()) diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index ab3d0b5137..2832f5bebd 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -516,12 +516,6 @@ impl Build { fn std_features(&self) -> String { let mut features = "panic-unwind".to_string(); - if self.config.debug_jemalloc { - features.push_str(" debug-jemalloc"); - } - if self.config.use_jemalloc { - features.push_str(" jemalloc"); - } if self.config.backtrace { features.push_str(" backtrace"); } @@ -537,8 +531,8 @@ impl Build { /// Get the space-separated set of activated features for the compiler. fn rustc_features(&self) -> String { let mut features = String::new(); - if self.config.use_jemalloc { - features.push_str(" jemalloc"); + if self.config.jemalloc { + features.push_str("jemalloc"); } features } @@ -768,7 +762,7 @@ impl Build { let sha = self.rust_sha().unwrap_or(channel::CFG_RELEASE_NUM); format!("/rustc/{}", sha) } - GitRepo::Llvm => format!("/rustc/llvm"), + GitRepo::Llvm => String::from("/rustc/llvm"), }; Some(format!("{}={}", self.src.display(), path)) } @@ -786,12 +780,12 @@ impl Build { let mut base = self.cc[&target].args().iter() .map(|s| s.to_string_lossy().into_owned()) .filter(|s| !s.starts_with("-O") && !s.starts_with("/O")) - .collect::>(); + .collect::>(); // If we're compiling on macOS then we add a few unconditional flags // indicating that we want libc++ (more filled out than libstdc++) and // we want to compile for 10.7. This way we can ensure that - // LLVM/jemalloc/etc are all properly compiled. + // LLVM/etc are all properly compiled. if target.contains("apple-darwin") { base.push("-stdlib=libc++".into()); } @@ -806,10 +800,10 @@ impl Build { if let Some(map) = self.debuginfo_map(which) { let cc = self.cc(target); if cc.ends_with("clang") || cc.ends_with("gcc") { - base.push(format!("-fdebug-prefix-map={}", map).into()); + base.push(format!("-fdebug-prefix-map={}", map)); } else if cc.ends_with("clang-cl.exe") { base.push("-Xclang".into()); - base.push(format!("-fdebug-prefix-map={}", map).into()); + base.push(format!("-fdebug-prefix-map={}", map)); } } base @@ -843,7 +837,8 @@ impl Build { } else if target != self.config.build && !target.contains("msvc") && !target.contains("emscripten") && - !target.contains("wasm32") { + !target.contains("wasm32") && + !target.contains("fuchsia") { Some(self.cc(target)) } else { None diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index 30fb4a86b6..448967ef0c 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -353,7 +353,7 @@ fn configure_cmake(builder: &Builder, // definitely causes problems since all the env vars are pointing to // 32-bit libraries. // - // To hack aroudn this... again... we pass an argument that's + // To hack around this... again... we pass an argument that's // unconditionally passed in the sccache shim. This'll get CMake to // correctly diagnose it's doing a 32-bit compilation and LLVM will // internally configure itself appropriately. @@ -361,7 +361,7 @@ fn configure_cmake(builder: &Builder, cfg.env("SCCACHE_EXTRA_ARGS", "-m32"); } - // If ccache is configured we inform the build a little differently hwo + // If ccache is configured we inform the build a little differently how // to invoke ccache while also invoking our compilers. } else if let Some(ref ccache) = builder.config.ccache { cfg.define("CMAKE_C_COMPILER", ccache) diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index 724cb5841f..15d3bccba0 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -74,7 +74,7 @@ pub fn check(build: &mut Build) { // one is present as part of the PATH then that can lead to the system // being unable to identify the files properly. See // https://github.com/rust-lang/rust/issues/34959 for more details. - if cfg!(windows) && path.to_string_lossy().contains("\"") { + if cfg!(windows) && path.to_string_lossy().contains('\"') { panic!("PATH contains invalid character '\"'"); } @@ -152,12 +152,6 @@ pub fn check(build: &mut Build) { if !build.config.dry_run { cmd_finder.must_have(build.cxx(*host).unwrap()); } - - // The msvc hosts don't use jemalloc, turn it off globally to - // avoid packaging the dummy liballoc_jemalloc on that platform. - if host.contains("msvc") { - build.config.use_jemalloc = false; - } } // Externally configured LLVM requires FileCheck to exist diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs index f6032eb993..da82735680 100644 --- a/src/bootstrap/test.rs +++ b/src/bootstrap/test.rs @@ -521,7 +521,7 @@ impl Step for RustdocTheme { fn make_run(run: RunConfig) { let compiler = run.builder.compiler(run.builder.top_stage, run.host); - run.builder.ensure(RustdocTheme { compiler: compiler }); + run.builder.ensure(RustdocTheme { compiler }); } fn run(self, builder: &Builder) { @@ -584,9 +584,9 @@ impl Step for RustdocJS { }); builder.run(&mut command); } else { - builder.info(&format!( + builder.info( "No nodejs found, skipping \"src/test/rustdoc-js\" tests" - )); + ); } } } @@ -653,7 +653,7 @@ impl Step for Tidy { } let _folder = builder.fold_output(|| "tidy"); - builder.info(&format!("tidy check")); + builder.info("tidy check"); try_run(builder, &mut cmd); } @@ -839,12 +839,6 @@ host_test!(CompileFailFullDeps { suite: "compile-fail-fulldeps" }); -host_test!(IncrementalFullDeps { - path: "src/test/incremental-fulldeps", - mode: "incremental", - suite: "incremental-fulldeps" -}); - host_test!(Rustdoc { path: "src/test/rustdoc", mode: "rustdoc", @@ -982,6 +976,11 @@ impl Step for Compiletest { builder.ensure(compile::Std { compiler, target: compiler.host }); } + // HACK(eddyb) ensure that `libproc_macro` is available on the host. + builder.ensure(compile::Test { compiler, target: compiler.host }); + // Also provide `rust_test_helpers` for the host. + builder.ensure(native::TestHelpers { target: compiler.host }); + builder.ensure(native::TestHelpers { target }); builder.ensure(RemoteCopyLibs { compiler, target }); @@ -1023,7 +1022,13 @@ impl Step for Compiletest { cmd.arg("--bless"); } - let compare_mode = builder.config.cmd.compare_mode().or(self.compare_mode); + let compare_mode = builder.config.cmd.compare_mode().or_else(|| { + if builder.config.test_compare_mode { + self.compare_mode + } else { + None + } + }); if let Some(ref nodejs) = builder.config.nodejs { cmd.arg("--nodejs").arg(nodejs); @@ -1049,7 +1054,11 @@ impl Step for Compiletest { cmd.arg("--linker").arg(linker); } - let hostflags = flags.clone(); + let mut hostflags = flags.clone(); + hostflags.push(format!( + "-Lnative={}", + builder.test_helpers_out(compiler.host).display() + )); cmd.arg("--host-rustcflags").arg(hostflags.join(" ")); let mut targetflags = flags; @@ -1168,9 +1177,9 @@ impl Step for Compiletest { } } if suite == "run-make-fulldeps" && !builder.config.llvm_enabled { - builder.info(&format!( + builder.info( "Ignoring run-make test suite as they generally don't work without LLVM" - )); + ); return; } @@ -1504,8 +1513,7 @@ impl Step for CrateNotDefault { type Output = (); fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/liballoc_jemalloc") - .path("src/librustc_asan") + run.path("src/librustc_asan") .path("src/librustc_lsan") .path("src/librustc_msan") .path("src/librustc_tsan") @@ -1522,7 +1530,6 @@ impl Step for CrateNotDefault { target: run.target, test_kind, krate: match run.path { - _ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc", _ if run.path.ends_with("src/librustc_asan") => "rustc_asan", _ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan", _ if run.path.ends_with("src/librustc_msan") => "rustc_msan", @@ -1561,7 +1568,6 @@ impl Step for Crate { run = run.krate("test"); for krate in run.builder.in_tree_crates("std") { if krate.is_local(&run.builder) - && !krate.name.contains("jemalloc") && !(krate.name.starts_with("rustc_") && krate.name.ends_with("san")) && krate.name != "dlmalloc" { @@ -1692,10 +1698,10 @@ impl Step for Crate { // The javascript shim implements the syscall interface so that test // output can be correctly reported. if !builder.config.wasm_syscall { - builder.info(&format!( + builder.info( "Libstd was built without `wasm_syscall` feature enabled: \ test output may not be visible." - )); + ); } // On the wasm32-unknown-unknown target we're using LTO which is @@ -1891,7 +1897,7 @@ impl Step for Distcheck { /// Run "distcheck", a 'make check' from a tarball fn run(self, builder: &Builder) { - builder.info(&format!("Distcheck")); + builder.info("Distcheck"); let dir = builder.out.join("tmp").join("distcheck"); let _ = fs::remove_dir_all(&dir); t!(fs::create_dir_all(&dir)); @@ -1919,7 +1925,7 @@ impl Step for Distcheck { ); // Now make sure that rust-src has all of libstd's dependencies - builder.info(&format!("Distcheck rust-src")); + builder.info("Distcheck rust-src"); let dir = builder.out.join("tmp").join("distcheck-src"); let _ = fs::remove_dir_all(&dir); t!(fs::create_dir_all(&dir)); diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs index 6868a063ce..58c5296beb 100644 --- a/src/bootstrap/tool.rs +++ b/src/bootstrap/tool.rs @@ -149,7 +149,7 @@ impl Step for ToolBuild { } }); - if is_expected && duplicates.len() != 0 { + if is_expected && !duplicates.is_empty() { println!("duplicate artfacts found when compiling a tool, this \ typically means that something was recompiled because \ a transitive dependency has different features activated \ @@ -171,7 +171,7 @@ impl Step for ToolBuild { println!(" `{}` additionally enabled features {:?} at {:?}", prev.0, &prev_features - &cur_features, prev.1); } - println!(""); + println!(); println!("to fix this you will probably want to edit the local \ src/tools/rustc-workspace-hack/Cargo.toml crate, as \ that will update the dependency graph to ensure that \ @@ -189,7 +189,7 @@ impl Step for ToolBuild { if !is_optional_tool { exit(1); } else { - return None; + None } } else { let cargo_out = builder.cargo_out(compiler, self.mode, target) @@ -253,15 +253,19 @@ pub fn prepare_tool_cargo( if let Some(date) = info.commit_date() { cargo.env("CFG_COMMIT_DATE", date); } - if features.len() > 0 { + if !features.is_empty() { cargo.arg("--features").arg(&features.join(", ")); } cargo } macro_rules! tool { - ($($name:ident, $path:expr, $tool_name:expr, $mode:expr - $(,llvm_tools = $llvm:expr)* $(,is_external_tool = $external:expr)*;)+) => { + ($( + $name:ident, $path:expr, $tool_name:expr, $mode:expr + $(,llvm_tools = $llvm:expr)* + $(,is_external_tool = $external:expr)* + ; + )+) => { #[derive(Copy, PartialEq, Eq, Clone)] pub enum Tool { $( diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs index 8ce8f20add..be24ae0ce6 100644 --- a/src/bootstrap/util.rs +++ b/src/bootstrap/util.rs @@ -203,11 +203,11 @@ pub fn symlink_dir(config: &Config, src: &Path, dest: &Path) -> io::Result<()> { // We're using low-level APIs to create the junction, and these are more // picky about paths. For example, forward slashes cannot be used as a // path separator, so we should try to canonicalize the path first. - let target = try!(fs::canonicalize(target)); + let target = fs::canonicalize(target)?; - try!(fs::create_dir(junction)); + fs::create_dir(junction)?; - let path = try!(to_u16s(junction)); + let path = to_u16s(junction)?; unsafe { let h = CreateFileW(path.as_ptr(), diff --git a/src/ci/docker/asmjs/Dockerfile b/src/ci/docker/asmjs/Dockerfile index cb85cf3d9e..9eaffbf83e 100644 --- a/src/ci/docker/asmjs/Dockerfile +++ b/src/ci/docker/asmjs/Dockerfile @@ -20,11 +20,11 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV PATH=$PATH:/emsdk-portable -ENV PATH=$PATH:/emsdk-portable/clang/e1.37.13_64bit/ -ENV PATH=$PATH:/emsdk-portable/emscripten/1.37.13/ -ENV PATH=$PATH:/emsdk-portable/node/4.1.1_64bit/bin/ -ENV EMSCRIPTEN=/emsdk-portable/emscripten/1.37.13/ -ENV BINARYEN_ROOT=/emsdk-portable/clang/e1.37.13_64bit/binaryen/ +ENV PATH=$PATH:/emsdk-portable/clang/e1.38.15_64bit/ +ENV PATH=$PATH:/emsdk-portable/emscripten/1.38.15/ +ENV PATH=$PATH:/emsdk-portable/node/8.9.1_64bit/bin/ +ENV EMSCRIPTEN=/emsdk-portable/emscripten/1.38.15/ +ENV BINARYEN_ROOT=/emsdk-portable/clang/e1.38.15_64bit/binaryen/ ENV EM_CONFIG=/emsdk-portable/.emscripten ENV TARGETS=asmjs-unknown-emscripten diff --git a/src/ci/docker/disabled/dist-powerpcspe-linux/Dockerfile b/src/ci/docker/disabled/dist-powerpcspe-linux/Dockerfile new file mode 100644 index 0000000000..3227819dad --- /dev/null +++ b/src/ci/docker/disabled/dist-powerpcspe-linux/Dockerfile @@ -0,0 +1,26 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + sudo \ + gdb \ + xz-utils \ + g++-powerpc-linux-gnuspe \ + libssl-dev \ + pkg-config + + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV HOSTS=powerpc-unknown-linux-gnuspe + +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs +ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/disabled/wasm32/Dockerfile b/src/ci/docker/disabled/wasm32/Dockerfile index 6ac90d1745..0d2bd39303 100644 --- a/src/ci/docker/disabled/wasm32/Dockerfile +++ b/src/ci/docker/disabled/wasm32/Dockerfile @@ -21,11 +21,11 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV PATH=$PATH:/emsdk-portable -ENV PATH=$PATH:/emsdk-portable/clang/e1.37.13_64bit/ -ENV PATH=$PATH:/emsdk-portable/emscripten/1.37.13/ -ENV PATH=$PATH:/node-v8.0.0-linux-x64/bin/ -ENV EMSCRIPTEN=/emsdk-portable/emscripten/1.37.13/ -ENV BINARYEN_ROOT=/emsdk-portable/clang/e1.37.13_64bit/binaryen/ +ENV PATH=$PATH:/emsdk-portable/clang/e1.38.15_64bit/ +ENV PATH=$PATH:/emsdk-portable/emscripten/1.38.15/ +ENV PATH=$PATH:/emsdk-portable/node/8.9.1_64bit/bin/ +ENV EMSCRIPTEN=/emsdk-portable/emscripten/1.38.15/ +ENV BINARYEN_ROOT=/emsdk-portable/clang/e1.38.15_64bit/binaryen/ ENV EM_CONFIG=/emsdk-portable/.emscripten ENV TARGETS=wasm32-unknown-emscripten diff --git a/src/ci/docker/dist-i686-linux/Dockerfile b/src/ci/docker/dist-i686-linux/Dockerfile index d99e409e42..b087ea7899 100644 --- a/src/ci/docker/dist-i686-linux/Dockerfile +++ b/src/ci/docker/dist-i686-linux/Dockerfile @@ -67,7 +67,7 @@ RUN ./build-gcc.sh COPY dist-x86_64-linux/build-python.sh /tmp/ RUN ./build-python.sh -# Now build LLVM+Clang 6, afterwards configuring further compilations to use the +# Now build LLVM+Clang 7, afterwards configuring further compilations to use the # clang/clang++ compilers. COPY dist-x86_64-linux/build-clang.sh /tmp/ RUN ./build-clang.sh @@ -98,7 +98,8 @@ ENV RUST_CONFIGURE_ARGS \ --enable-sanitizers \ --enable-profiler \ --set target.i686-unknown-linux-gnu.linker=clang \ - --build=i686-unknown-linux-gnu + --build=i686-unknown-linux-gnu \ + --set rust.jemalloc ENV SCRIPT python2.7 ../x.py dist --build $HOSTS --host $HOSTS --target $HOSTS ENV CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_LINKER=clang diff --git a/src/ci/docker/dist-various-1/Dockerfile b/src/ci/docker/dist-various-1/Dockerfile index e2484b7224..c7e6af28f9 100644 --- a/src/ci/docker/dist-various-1/Dockerfile +++ b/src/ci/docker/dist-various-1/Dockerfile @@ -22,7 +22,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ libssl-dev \ pkg-config \ gcc-arm-none-eabi \ - libnewlib-arm-none-eabi + libnewlib-arm-none-eabi \ + qemu-system-arm WORKDIR /build diff --git a/src/ci/docker/dist-various-2/Dockerfile b/src/ci/docker/dist-various-2/Dockerfile index 7adb32efa1..944c2a51b8 100644 --- a/src/ci/docker/dist-various-2/Dockerfile +++ b/src/ci/docker/dist-various-2/Dockerfile @@ -47,6 +47,17 @@ ENV \ CC_x86_64_sun_solaris=x86_64-sun-solaris2.10-gcc \ CXX_x86_64_sun_solaris=x86_64-sun-solaris2.10-g++ +ENV CARGO_TARGET_X86_64_FUCHSIA_AR /usr/local/bin/llvm-ar +ENV CARGO_TARGET_X86_64_FUCHSIA_RUSTFLAGS \ +-C link-arg=--sysroot=/usr/local/x86_64-fuchsia \ +-C link-arg=-L/usr/local/x86_64-fuchsia/lib \ +-C link-arg=-L/usr/local/lib/x86_64-fuchsia/lib +ENV CARGO_TARGET_AARCH64_FUCHSIA_AR /usr/local/bin/llvm-ar +ENV CARGO_TARGET_AARCH64_FUCHSIA_RUSTFLAGS \ +-C link-arg=--sysroot=/usr/local/aarch64-fuchsia \ +-C link-arg=-L/usr/local/aarch64-fuchsia/lib \ +-C link-arg=-L/usr/local/lib/aarch64-fuchsia/lib + ENV TARGETS=x86_64-fuchsia ENV TARGETS=$TARGETS,aarch64-fuchsia ENV TARGETS=$TARGETS,sparcv9-sun-solaris @@ -55,5 +66,5 @@ ENV TARGETS=$TARGETS,x86_64-sun-solaris ENV TARGETS=$TARGETS,x86_64-unknown-linux-gnux32 ENV TARGETS=$TARGETS,x86_64-unknown-cloudabi -ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs +ENV RUST_CONFIGURE_ARGS --enable-extended --enable-lld --disable-docs ENV SCRIPT python2.7 ../x.py dist --target $TARGETS diff --git a/src/ci/docker/dist-x86_64-linux/Dockerfile b/src/ci/docker/dist-x86_64-linux/Dockerfile index 8696f72e0e..a1a778c2b2 100644 --- a/src/ci/docker/dist-x86_64-linux/Dockerfile +++ b/src/ci/docker/dist-x86_64-linux/Dockerfile @@ -67,7 +67,7 @@ RUN ./build-gcc.sh COPY dist-x86_64-linux/build-python.sh /tmp/ RUN ./build-python.sh -# Now build LLVM+Clang 6, afterwards configuring further compilations to use the +# Now build LLVM+Clang 7, afterwards configuring further compilations to use the # clang/clang++ compilers. COPY dist-x86_64-linux/build-clang.sh /tmp/ RUN ./build-clang.sh @@ -101,7 +101,8 @@ ENV RUST_CONFIGURE_ARGS \ --set target.x86_64-unknown-linux-gnu.linker=clang \ --set target.x86_64-unknown-linux-gnu.ar=/rustroot/bin/llvm-ar \ --set target.x86_64-unknown-linux-gnu.ranlib=/rustroot/bin/llvm-ranlib \ - --set llvm.thin-lto=true + --set llvm.thin-lto=true \ + --set rust.jemalloc ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=clang diff --git a/src/ci/docker/dist-x86_64-linux/build-clang.sh b/src/ci/docker/dist-x86_64-linux/build-clang.sh index 4595eacb31..2762f0bf7e 100755 --- a/src/ci/docker/dist-x86_64-linux/build-clang.sh +++ b/src/ci/docker/dist-x86_64-linux/build-clang.sh @@ -13,7 +13,7 @@ set -ex source shared.sh -LLVM=6.0.0 +LLVM=7.0.0 mkdir clang cd clang diff --git a/src/ci/docker/scripts/emscripten.sh b/src/ci/docker/scripts/emscripten.sh index d32ed6b461..1d7b33db9e 100644 --- a/src/ci/docker/scripts/emscripten.sh +++ b/src/ci/docker/scripts/emscripten.sh @@ -33,8 +33,8 @@ curl -fL https://s3.amazonaws.com/mozilla-games/emscripten/releases/emsdk-portab cd /emsdk-portable ./emsdk update -hide_output ./emsdk install sdk-1.37.13-64bit -./emsdk activate sdk-1.37.13-64bit +hide_output ./emsdk install sdk-1.38.15-64bit +./emsdk activate sdk-1.38.15-64bit # Compile and cache libc source ./emsdk_env.sh @@ -46,8 +46,3 @@ rm -f a.* # Make emsdk usable by any user cp /root/.emscripten /emsdk-portable chmod a+rxw -R /emsdk-portable - -# node 8 is required to run wasm -cd / -curl -sL https://nodejs.org/dist/v8.0.0/node-v8.0.0-linux-x64.tar.xz | \ - tar -xJ diff --git a/src/ci/docker/scripts/musl.sh b/src/ci/docker/scripts/musl.sh index fcebfb9324..11d85471b7 100644 --- a/src/ci/docker/scripts/musl.sh +++ b/src/ci/docker/scripts/musl.sh @@ -51,7 +51,7 @@ hide_output make clean cd .. -LLVM=60 +LLVM=70 # may have been downloaded in a previous run if [ ! -d libunwind-release_$LLVM ]; then diff --git a/src/ci/docker/wasm32-unknown/Dockerfile b/src/ci/docker/wasm32-unknown/Dockerfile index f2a29b0315..161f0c0062 100644 --- a/src/ci/docker/wasm32-unknown/Dockerfile +++ b/src/ci/docker/wasm32-unknown/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:18.04 RUN apt-get update && apt-get install -y --no-install-recommends \ g++ \ diff --git a/src/ci/docker/x86_64-gnu-nopt/Dockerfile b/src/ci/docker/x86_64-gnu-nopt/Dockerfile index d2b0dd13dc..b0780fdf32 100644 --- a/src/ci/docker/x86_64-gnu-nopt/Dockerfile +++ b/src/ci/docker/x86_64-gnu-nopt/Dockerfile @@ -16,5 +16,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh -ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --disable-optimize-tests +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu \ + --disable-optimize-tests \ + --set rust.test-compare-mode ENV RUST_CHECK_TARGET check diff --git a/src/ci/docker/x86_64-gnu/Dockerfile b/src/ci/docker/x86_64-gnu/Dockerfile index 444a8fe5da..dd94f2652b 100644 --- a/src/ci/docker/x86_64-gnu/Dockerfile +++ b/src/ci/docker/x86_64-gnu/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:18.10 RUN apt-get update && apt-get install -y --no-install-recommends \ g++ \ diff --git a/src/ci/init_repo.sh b/src/ci/init_repo.sh index f2664e6d19..8345ab3bc3 100755 --- a/src/ci/init_repo.sh +++ b/src/ci/init_repo.sh @@ -55,6 +55,7 @@ function fetch_submodule { } included="src/llvm src/llvm-emscripten src/doc/book src/doc/rust-by-example" +included="$included src/tools/lld src/tools/clang src/tools/lldb" modules="$(git config --file .gitmodules --get-regexp '\.path$' | cut -d' ' -f2)" modules=($modules) use_git="" diff --git a/src/ci/run.sh b/src/ci/run.sh index 324c873dd3..2de80456cd 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -40,6 +40,7 @@ RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-sccache" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-manage-submodules" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-locked-deps" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-cargo-native-static" +RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set rust.codegen-units-std=1" if [ "$DIST_SRC" = "" ]; then RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-dist-src" @@ -51,7 +52,7 @@ fi # # FIXME: need a scheme for changing this `nightly` value to `beta` and `stable` # either automatically or manually. -export RUST_RELEASE_CHANNEL=stable +export RUST_RELEASE_CHANNEL=beta if [ "$DEPLOY$DEPLOY_ALT" != "" ]; then RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --release-channel=$RUST_RELEASE_CHANNEL" RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-static-stdcpp" diff --git a/src/doc/index.md b/src/doc/index.md index 33ee76739c..b79a349a45 100644 --- a/src/doc/index.md +++ b/src/doc/index.md @@ -21,6 +21,9 @@ nav { #search-but:hover, #search-input:focus { border-color: #55a9ff; } +h2 { + font-size: 18px; +} Welcome to an overview of the documentation provided by the Rust project. diff --git a/src/doc/nomicon/src/vec-final.md b/src/doc/nomicon/src/vec-final.md index d8aec0f24a..66c8e8bcba 100644 --- a/src/doc/nomicon/src/vec-final.md +++ b/src/doc/nomicon/src/vec-final.md @@ -3,6 +3,7 @@ ```rust #![feature(ptr_internals)] #![feature(allocator_api)] +#![feature(alloc_layout_extra)] use std::ptr::{Unique, NonNull, self}; use std::mem; diff --git a/src/doc/rustc-guide/.editorconfig b/src/doc/rustc-guide/.editorconfig new file mode 100644 index 0000000000..cb3312517e --- /dev/null +++ b/src/doc/rustc-guide/.editorconfig @@ -0,0 +1,9 @@ +root = true + +[src/*] +end_of_line = lf +insert_final_newline = true + +[ci/*.sh] +indent_style = space +indent_size = 2 diff --git a/src/doc/rustc-guide/.travis.yml b/src/doc/rustc-guide/.travis.yml new file mode 100644 index 0000000000..55ea205876 --- /dev/null +++ b/src/doc/rustc-guide/.travis.yml @@ -0,0 +1,25 @@ +language: rust +cache: +- cargo +before_install: +- shopt -s globstar +- MAX_LINE_LENGTH=100 bash ci/check_line_lengths.sh src/**/*.md +install: +- source ~/.cargo/env || true +- bash ci/install.sh +script: +- mdbook build +- mdbook test +notifications: + email: + on_success: never +env: + global: + secure: YQX/AWq5KsvAFYqcCK6c1DmOZX9EMrecBM5qnc4uE2HvEBS+x0l8xatI2Nv8U9eiasZYfsqmHn0ANvxu6e4oqL15m4cVsdliCzdkrPsDapxTnwwJvMQg+yHZiEd5BPlaDQt/wYvP8QBXgQsXoAJKrfAS+BFsowBFHt/LOFOunbAQrtQZqwqrnI6+xh+2TRMckws/VcTLRqwl3pyEyfacJhbbv1V3gJh7Y17hELsgsP7+7cMXT0bK6dtf7a9vne9Hsm5fw7VeMKBn1/dJ82fyEK6HHjkjdw1/OoY35YVyNZ/9ZxP2u1ClEXzCRJQ2CvKr8Tuoh/AuoL0pwrfhOTaOuWU0QZT4QBqjTimsgBLqiJicMiSndgsXinLWvlDqrMS1XfleqCKqAQy9AJTCR1LnwR90/HRxfE5YDAL/mbc0Su4jj+l5Zv3UE8vUqFE34E/jzip17JkDT5aMkl4bgW65lqJE7SLWl7gXT7eYbPEtQZoucR1hkSsBu/4YTvcxSlD98spWZ68mWwYyjLJSQDES+GefUnHJ/RbBVl9pW+sL7jXJ+kZ/NBCtCIgrkGchudEMDEvS6rcOzwCejxqL1of0jYHGopkBXSVHOPneWIdNeKXwBZA9hp0yKh0sWwrKHrA3wYhS/kF9uO19l/RnSTXAfApYR/yJUbYliuMJYCgNeKE= +deploy: + provider: pages + skip-cleanup: true + github-token: $GITHUB_TOKEN + local-dir: book/html + on: + branch: master diff --git a/src/doc/rustc-guide/CODE_OF_CONDUCT.md b/src/doc/rustc-guide/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..89a9cdfcce --- /dev/null +++ b/src/doc/rustc-guide/CODE_OF_CONDUCT.md @@ -0,0 +1,38 @@ +# The Rust Code of Conduct + +A version of this document [can be found online](https://www.rust-lang.org/conduct.html). + +## Conduct + +**Contact**: [rust-mods@rust-lang.org](mailto:rust-mods@rust-lang.org) + +* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. +* On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. +* Please be kind and courteous. There's no need to be mean or rude. +* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. +* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. +* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term "harassment" as including the definition in the Citizen Code of Conduct; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. +* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Rust moderation team](/team.html#Moderation) immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. +* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome. + +## Moderation + + +These are the policies for upholding our community's standards of conduct. If you feel that a thread needs moderation, please contact the [Rust moderation team](/team.html#Moderation). + +1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) +2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. +3. Moderators will first respond to such remarks with a warning. +4. If the warning is unheeded, the user will be "kicked," i.e. kicked out of the communication channel to cool off. +5. If the user comes back and continues to make trouble, they will be banned, i.e. indefinitely excluded. +6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. +7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed. +8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. + +In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. + +And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. + +The enforcement policies listed above apply to all official Rust venues; including official IRC channels (#rust, #rust-internals, #rust-tools, #rust-libs, #rustc, #rust-beginners, #rust-docs, #rust-community, #rust-lang, and #cargo); GitHub repositories under rust-lang, rust-lang-nursery, and rust-lang-deprecated; and all forums under rust-lang.org (users.rust-lang.org, internals.rust-lang.org). For other projects adopting the Rust Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion. + +*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/).* diff --git a/src/vendor/arrayvec/LICENSE-APACHE b/src/doc/rustc-guide/LICENSE-APACHE similarity index 100% rename from src/vendor/arrayvec/LICENSE-APACHE rename to src/doc/rustc-guide/LICENSE-APACHE diff --git a/src/vendor/datafrog/LICENSE-MIT b/src/doc/rustc-guide/LICENSE-MIT similarity index 100% rename from src/vendor/datafrog/LICENSE-MIT rename to src/doc/rustc-guide/LICENSE-MIT diff --git a/src/doc/rustc-guide/README.md b/src/doc/rustc-guide/README.md new file mode 100644 index 0000000000..45c7503fec --- /dev/null +++ b/src/doc/rustc-guide/README.md @@ -0,0 +1,43 @@ +This is a collaborate effort to build a guide that explains how rustc +works. The aim of the guide is to help new contributors get oriented +to rustc, as well as to help more experienced folks in figuring out +some new part of the compiler that they haven't worked on before. + +[You can read the latest version of the guide here.](https://rust-lang-nursery.github.io/rustc-guide/) + +You may also find the rustdocs [for the compiler itself][rustdocs] useful. + +[rustdocs]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ + +The guide can be useful today, but it has a lot of work still go. + +### Contributing to the guide + +If you'd like to help improve the guide, we'd love to have you! You can find +plenty of issues on the [issue +tracker](https://github.com/rust-lang-nursery/rustc-guide/issue). Just post a +comment on the issue you would like to work on to make sure that we don't +accidentally duplicate work. If you think something is missing, please open an +issue about it! + +**In general, if you don't know how the compiler works, that is not a +problem!** In that case, what we will do is to schedule a bit of time +for you to talk with someone who **does** know the code, or who wants +to pair with you and figure it out. Then you can work on writing up +what you learned. + +In general, when writing about a particular part of the compiler's code, we +recommend that you link to the relevant parts of the [rustc +rustdocs][rustdocs]. + +To help prevent accidentally introducing broken links, we use the +`mdbook-linkcheck`. If installed on your machine `mdbook` will automatically +invoke this link checker, otherwise it will emit a warning saying it couldn't +be found. + +```bash +> cargo install mdbook-linkcheck +``` + +You will need `mdbook` version `>= 0.2`. `linkcheck` will be run automatically +when you run `mdbook build`. diff --git a/src/doc/rustc-guide/book.toml b/src/doc/rustc-guide/book.toml new file mode 100644 index 0000000000..b9092a969c --- /dev/null +++ b/src/doc/rustc-guide/book.toml @@ -0,0 +1,12 @@ +[book] +title = "Guide to Rustc Development" +author = "Rustc developers" +description = "A guide to developing rustc" + +[output.html] + +[output.html.search] + +[output.linkcheck] +follow-web-links = true +exclude = [ "crates\\.io", "gcc\\.godbolt\\.org" ] diff --git a/src/doc/rustc-guide/ci/check_line_lengths.sh b/src/doc/rustc-guide/ci/check_line_lengths.sh new file mode 100755 index 0000000000..32bf739e4d --- /dev/null +++ b/src/doc/rustc-guide/ci/check_line_lengths.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +if [ "$1" == "--help" ]; then + echo 'Usage:' + echo ' MAX_LINE_LENGTH=100' "$0" 'src/**/*.md' + exit 1 +fi + +if [ "$MAX_LINE_LENGTH" == "" ]; then + echo '`MAX_LINE_LENGTH` environment variable not set. Try --help.' + exit 1 +fi + +if [ "$1" == "" ]; then + echo 'No files provided.' + exit 1 +fi + +echo "Checking line lengths in all source files <= $MAX_LINE_LENGTH chars..." + +echo "Offending files and lines:" +(( bad_lines = 0 )) +(( inside_block = 0 )) +for file in "$@" ; do + echo "$file" + (( line_no = 0 )) + while IFS="" read -r line || [[ -n "$line" ]] ; do + (( line_no++ )) + if [[ "$line" =~ ^'```' ]] ; then + (( inside_block = !$inside_block )) + continue + fi + if ! (( $inside_block )) \ + && ! [[ "$line" =~ " | "|"-|-"|"://"|"]:"|\[\^[^\ ]+\]: ]] \ + && (( "${#line}" > $MAX_LINE_LENGTH )) ; then + (( bad_lines++ )) + echo -e "\t$line_no : $line" + fi + done < "$file" +done + +echo "$bad_lines offending lines found." +(( $bad_lines == 0 )) diff --git a/src/doc/rustc-guide/ci/install.sh b/src/doc/rustc-guide/ci/install.sh new file mode 100755 index 0000000000..947d751b48 --- /dev/null +++ b/src/doc/rustc-guide/ci/install.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -ex + +function cargo_install() { + local name=$1 + local version=$2 + + if command -v $name >/dev/null 2>&1; then + local installed_version=`$name --version | sed -E 's/[a-zA-Z_-]+ v?//g'` + if [ "$installed_version" == "$version" ]; then + echo "$name $version is already installed at $(command -v $name)" + else + echo "Forcing install $name $version" + cargo install $name --version $version --force + fi + else + echo "Installing $name $version" + cargo install $name --version $version + fi +} + +cargo_install mdbook 0.2.2 +cargo_install mdbook-linkcheck 0.2.3 diff --git a/src/doc/rustc-guide/src/SUMMARY.md b/src/doc/rustc-guide/src/SUMMARY.md new file mode 100644 index 0000000000..efe963c3fc --- /dev/null +++ b/src/doc/rustc-guide/src/SUMMARY.md @@ -0,0 +1,75 @@ +# Summary + +- [About this guide](./about-this-guide.md) +- [About the compiler team](./compiler-team.md) +- [How to build the compiler and run what you built](./how-to-build-and-run.md) + - [Build and Install distribution artifacts](./build-install-distribution-artifacts.md) + - [Documenting Compiler](./compiler-documenting.md) +- [Coding conventions](./conventions.md) +- [Walkthrough: a typical contribution](./walkthrough.md) +- [The compiler testing framework](./tests/intro.md) + - [Running tests](./tests/running.md) + - [Adding new tests](./tests/adding.md) + - [Using `compiletest` + commands to control test + execution](./compiletest.md) +- [Debugging the Compiler](./compiler-debugging.md) +- [Profiling the compiler](./profiling.md) + - [with the linux perf tool](./profiling/with_perf.md) +- [High-level overview of the compiler source](./high-level-overview.md) +- [The Rustc Driver](./rustc-driver.md) + - [Rustdoc](./rustdoc.md) +- [Queries: demand-driven compilation](./query.md) + - [Incremental compilation](./incremental-compilation.md) + - [Debugging and Testing](./incrcomp-debugging.md) +- [The parser](./the-parser.md) +- [`#[test]` Implementation](./test-implementation.md) +- [Macro expansion](./macro-expansion.md) +- [Name resolution](./name-resolution.md) +- [The HIR (High-level IR)](./hir.md) + - [Lowering AST to HIR](./lowering.md) +- [The `ty` module: representing types](./ty.md) +- [Type inference](./type-inference.md) +- [Trait solving (old-style)](./traits/resolution.md) + - [Higher-ranked trait bounds](./traits/hrtb.md) + - [Caching subtleties](./traits/caching.md) + - [Specialization](./traits/specialization.md) +- [Trait solving (new-style)](./traits/index.md) + - [Lowering to logic](./traits/lowering-to-logic.md) + - [Goals and clauses](./traits/goals-and-clauses.md) + - [Equality and associated types](./traits/associated-types.md) + - [Implied bounds](./traits/implied-bounds.md) + - [Region constraints](./traits/regions.md) + - [The lowering module in rustc](./traits/lowering-module.md) + - [Lowering rules](./traits/lowering-rules.md) + - [Well-formedness checking](./traits/wf.md) + - [Canonical queries](./traits/canonical-queries.md) + - [Canonicalization](./traits/canonicalization.md) + - [The SLG solver](./traits/slg.md) + - [An Overview of Chalk](./traits/chalk-overview.md) + - [Bibliography](./traits/bibliography.md) +- [Type checking](./type-checking.md) + - [Method Lookup](./method-lookup.md) + - [Variance](./variance.md) + - [Existential Types](./existential-types.md) +- [The MIR (Mid-level IR)](./mir/index.md) + - [MIR construction](./mir/construction.md) + - [MIR visitor and traversal](./mir/visitor.md) + - [MIR passes: getting the MIR for a function](./mir/passes.md) + - [MIR optimizations](./mir/optimizations.md) +- [The borrow checker](./borrow_check.md) + - [Tracking moves and initialization](./borrow_check/moves_and_initialization.md) + - [Move paths](./borrow_check/moves_and_initialization/move_paths.md) + - [MIR type checker](./borrow_check/type_check.md) + - [Region inference](./borrow_check/region_inference.md) +- [Constant evaluation](./const-eval.md) + - [miri const evaluator](./miri.md) +- [Parameter Environments](./param_env.md) +- [Code Generation](./codegen.md) +- [Emitting Diagnostics](./diag.md) + +--- + +- [Appendix A: Stupid Stats](./appendix/stupid-stats.md) +- [Appendix B: Background material](./appendix/background.md) +- [Appendix C: Glossary](./appendix/glossary.md) +- [Appendix D: Code Index](./appendix/code-index.md) diff --git a/src/doc/rustc-guide/src/about-this-guide.md b/src/doc/rustc-guide/src/about-this-guide.md new file mode 100644 index 0000000000..79106736ff --- /dev/null +++ b/src/doc/rustc-guide/src/about-this-guide.md @@ -0,0 +1,14 @@ +# About this guide + +This guide is meant to help document how rustc – the Rust compiler – +works, as well as to help new contributors get involved in rustc +development. It is not meant to replace code documentation – each +chapter gives only high-level details – the kinds of things that +(ideally) don't change frequently. + +The guide itself is of course open-source as well, and the sources can +be found at the [GitHub repository]. If you find any mistakes in the +guide, please file an issue about it, or even better, open a PR +with a correction! + +[GitHub repository]: https://github.com/rust-lang-nursery/rustc-guide/ diff --git a/src/doc/rustc-guide/src/appendix/background.md b/src/doc/rustc-guide/src/appendix/background.md new file mode 100644 index 0000000000..dbfe6a3dbe --- /dev/null +++ b/src/doc/rustc-guide/src/appendix/background.md @@ -0,0 +1,128 @@ +# Appendix B: Background topics + +This section covers a numbers of common compiler terms that arise in +this guide. We try to give the general definition while providing some +Rust-specific context. + + + +## What is a control-flow graph? + +A control-flow graph is a common term from compilers. If you've ever +used a flow-chart, then the concept of a control-flow graph will be +pretty familiar to you. It's a representation of your program that +exposes the underlying control flow in a very clear way. + +A control-flow graph is structured as a set of **basic blocks** +connected by edges. The key idea of a basic block is that it is a set +of statements that execute "together" – that is, whenever you branch +to a basic block, you start at the first statement and then execute +all the remainder. Only at the end of the block is there the +possibility of branching to more than one place (in MIR, we call that +final statement the **terminator**): + +```mir +bb0: { + statement0; + statement1; + statement2; + ... + terminator; +} +``` + +Many expressions that you are used to in Rust compile down to multiple +basic blocks. For example, consider an if statement: + +```rust,ignore +a = 1; +if some_variable { + b = 1; +} else { + c = 1; +} +d = 1; +``` + +This would compile into four basic blocks: + +```mir +BB0: { + a = 1; + if some_variable { goto BB1 } else { goto BB2 } +} + +BB1: { + b = 1; + goto BB3; +} + +BB2: { + c = 1; + goto BB3; +} + +BB3: { + d = 1; + ...; +} +``` + +When using a control-flow graph, a loop simply appears as a cycle in +the graph, and the `break` keyword translates into a path out of that +cycle. + + + +## What is a dataflow analysis? + +[*Static Program Analysis*](https://cs.au.dk/~amoeller/spa/) by Anders Møller +and Michael I. Schwartzbach is an incredible resource! + +*to be written* + + + +## What is "universally quantified"? What about "existentially quantified"? + +*to be written* + + + +## What is co- and contra-variance? + +Check out the subtyping chapter from the +[Rust Nomicon](https://doc.rust-lang.org/nomicon/subtyping.html). + +See the [variance](../variance.html) chapter of this guide for more info on how +the type checker handles variance. + + + +## What is a "free region" or a "free variable"? What about "bound region"? + +Let's describe the concepts of free vs bound in terms of program +variables, since that's the thing we're most familiar with. + +- Consider this expression, which creates a closure: `|a, + b| a + b`. Here, the `a` and `b` in `a + b` refer to the arguments + that the closure will be given when it is called. We say that the + `a` and `b` there are **bound** to the closure, and that the closure + signature `|a, b|` is a **binder** for the names `a` and `b` + (because any references to `a` or `b` within refer to the variables + that it introduces). +- Consider this expression: `a + b`. In this expression, `a` and `b` + refer to local variables that are defined *outside* of the + expression. We say that those variables **appear free** in the + expression (i.e., they are **free**, not **bound** (tied up)). + +So there you have it: a variable "appears free" in some +expression/statement/whatever if it refers to something defined +outside of that expressions/statement/whatever. Equivalently, we can +then refer to the "free variables" of an expression – which is just +the set of variables that "appear free". + +So what does this have to do with regions? Well, we can apply the +analogous concept to type and regions. For example, in the type `&'a +u32`, `'a` appears free. But in the type `for<'a> fn(&'a u32)`, it +does not. diff --git a/src/doc/rustc-guide/src/appendix/code-index.md b/src/doc/rustc-guide/src/appendix/code-index.md new file mode 100644 index 0000000000..e1bde6863a --- /dev/null +++ b/src/doc/rustc-guide/src/appendix/code-index.md @@ -0,0 +1,44 @@ +# Appendix D: Code Index + +rustc has a lot of important data structures. This is an attempt to give some +guidance on where to learn more about some of the key data structures of the +compiler. + +Item | Kind | Short description | Chapter | Declaration +----------------|----------|-----------------------------|--------------------|------------------- +`BodyId` | struct | One of four types of HIR node identifiers | [Identifiers in the HIR] | [src/librustc/hir/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.BodyId.html) +`CompileState` | struct | State that is passed to a callback at each compiler pass | [The Rustc Driver] | [src/librustc_driver/driver.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_driver/driver/struct.CompileState.html) +`ast::Crate` | struct | A syntax-level representation of a parsed crate | [The parser] | [src/librustc/hir/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ast/struct.Crate.html) +`hir::Crate` | struct | A more abstract, compiler-friendly form of a crate's AST | [The Hir] | [src/librustc/hir/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.Crate.html) +`DefId` | struct | One of four types of HIR node identifiers | [Identifiers in the HIR] | [src/librustc/hir/def_id.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/def_id/struct.DefId.html) +`DiagnosticBuilder` | struct | A struct for building up compiler diagnostics, such as errors or lints | [Emitting Diagnostics] | [src/librustc_errors/diagnostic_builder.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/struct.DiagnosticBuilder.html) +`DocContext` | struct | A state container used by rustdoc when crawling through a crate to gather its documentation | [Rustdoc] | [src/librustdoc/core.rs](https://github.com/rust-lang/rust/blob/master/src/librustdoc/core.rs) +`HirId` | struct | One of four types of HIR node identifiers | [Identifiers in the HIR] | [src/librustc/hir/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.HirId.html) +`NodeId` | struct | One of four types of HIR node identifiers. Being phased out | [Identifiers in the HIR] | [src/libsyntax/ast.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ast/struct.NodeId.html) +`ParamEnv` | struct | Information about generic parameters or `Self`, useful for working with associated or generic items | [Parameter Environment] | [src/librustc/ty/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/struct.ParamEnv.html) +`ParseSess` | struct | This struct contains information about a parsing session | [The parser] | [src/libsyntax/parse/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/struct.ParseSess.html) +`Rib` | struct | Represents a single scope of names | [Name resolution] | [src/librustc_resolve/lib.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_resolve/struct.Rib.html) +`Session` | struct | The data associated with a compilation session | [The parser], [The Rustc Driver] | [src/librustc/session/mod.html](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/session/struct.Session.html) +`SourceFile` | struct | Part of the `SourceMap`. Maps AST nodes to their source code for a single source file. Was previously called FileMap | [The parser] | [src/libsyntax_pos/lib.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceFile.html) +`SourceMap` | struct | Maps AST nodes to their source code. It is composed of `SourceFile`s. Was previously called CodeMap | [The parser] | [src/libsyntax/source_map.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceMap.html) +`Span` | struct | A location in the user's source code, used for error reporting primarily | [Emitting Diagnostics] | [src/libsyntax_pos/span_encoding.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax_pos/struct.Span.html) +`StringReader` | struct | This is the lexer used during parsing. It consumes characters from the raw source code being compiled and produces a series of tokens for use by the rest of the parser | [The parser] | [src/libsyntax/parse/lexer/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/lexer/struct.StringReader.html) +`syntax::token_stream::TokenStream` | struct | An abstract sequence of tokens, organized into `TokenTree`s | [The parser], [Macro expansion] | [src/libsyntax/tokenstream.rs](https://doc.rust-lang.org/nightly/nightly-rustc/syntax/tokenstream/struct.TokenStream.html) +`TraitDef` | struct | This struct contains a trait's definition with type information | [The `ty` modules] | [src/librustc/ty/trait_def.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/trait_def/struct.TraitDef.html) +`TraitRef` | struct | The combination of a trait and its input types (e.g. `P0: Trait`) | [Trait Solving: Goals and Clauses], [Trait Solving: Lowering impls] | [src/librustc/ty/sty.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/struct.TraitRef.html) +`Ty<'tcx>` | struct | This is the internal representation of a type used for type checking | [Type checking] | [src/librustc/ty/mod.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/type.Ty.html) +`TyCtxt<'cx, 'tcx, 'tcx>` | type | The "typing context". This is the central data structure in the compiler. It is the context that you use to perform all manner of queries | [The `ty` modules] | [src/librustc/ty/context.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/struct.TyCtxt.html) + +[The HIR]: ../hir.html +[Identifiers in the HIR]: ../hir.html#hir-id +[The parser]: ../the-parser.html +[The Rustc Driver]: ../rustc-driver.html +[Type checking]: ../type-checking.html +[The `ty` modules]: ../ty.html +[Rustdoc]: ../rustdoc.html +[Emitting Diagnostics]: ../diag.html +[Macro expansion]: ../macro-expansion.html +[Name resolution]: ../name-resolution.html +[Parameter Environment]: ../param_env.html +[Trait Solving: Goals and Clauses]: ../traits/goals-and-clauses.html#domain-goals +[Trait Solving: Lowering impls]: ../traits/lowering-rules.html#lowering-impls diff --git a/src/doc/rustc-guide/src/appendix/glossary.md b/src/doc/rustc-guide/src/appendix/glossary.md new file mode 100644 index 0000000000..82d6f9a21a --- /dev/null +++ b/src/doc/rustc-guide/src/appendix/glossary.md @@ -0,0 +1,78 @@ +# Appendix C: Glossary + +The compiler uses a number of...idiosyncratic abbreviations and things. This +glossary attempts to list them and give you a few pointers for understanding +them better. + +Term | Meaning +------------------------|-------- +AST | the abstract syntax tree produced by the syntax crate; reflects user syntax very closely. +binder | a "binder" is a place where a variable or type is declared; for example, the `` is a binder for the generic type parameter `T` in `fn foo(..)`, and \|`a`\|` ...` is a binder for the parameter `a`. See [the background chapter for more](./background.html#free-vs-bound) +bound variable | a "bound variable" is one that is declared within an expression/term. For example, the variable `a` is bound within the closure expession \|`a`\|` a * 2`. See [the background chapter for more](./background.html#free-vs-bound) +codegen | the code to translate MIR into LLVM IR. +codegen unit | when we produce LLVM IR, we group the Rust code into a number of codegen units. Each of these units is processed by LLVM independently from one another, enabling parallelism. They are also the unit of incremental re-use. +completeness | completeness is a technical term in type theory. Completeness means that every type-safe program also type-checks. Having both soundness and completeness is very hard, and usually soundness is more important. (see "soundness"). +control-flow graph | a representation of the control-flow of a program; see [the background chapter for more](./background.html#cfg) +CTFE | Compile-Time Function Evaluation. This is the ability of the compiler to evaluate `const fn`s at compile time. This is part of the compiler's constant evaluation system. ([see more](../const-eval.html)) +cx | we tend to use "cx" as an abbreviation for context. See also `tcx`, `infcx`, etc. +DAG | a directed acyclic graph is used during compilation to keep track of dependencies between queries. ([see more](../incremental-compilation.html)) +data-flow analysis | a static analysis that figures out what properties are true at each point in the control-flow of a program; see [the background chapter for more](./background.html#dataflow) +DefId | an index identifying a definition (see `librustc/hir/def_id.rs`). Uniquely identifies a `DefPath`. +Double pointer | a pointer with additional metadata. See "fat pointer" for more. +DST | Dynamically-Sized Type. A type for which the compiler cannot statically know the size in memory (e.g. `str` or `[u8]`). Such types don't implement `Sized` and cannot be allocated on the stack. They can only occur as the last field in a struct. They can only be used behind a pointer (e.g. `&str` or `&[u8]`). +empty type | see "uninhabited type". +Fat pointer | a two word value carrying the address of some value, along with some further information necessary to put the value to use. Rust includes two kinds of "fat pointers": references to slices, and trait objects. A reference to a slice carries the starting address of the slice and its length. A trait object carries a value's address and a pointer to the trait's implementation appropriate to that value. "Fat pointers" are also known as "wide pointers", and "double pointers". +free variable | a "free variable" is one that is not bound within an expression or term; see [the background chapter for more](./background.html#free-vs-bound) +'gcx | the lifetime of the global arena ([see more](../ty.html)) +generics | the set of generic type parameters defined on a type or item +HIR | the High-level IR, created by lowering and desugaring the AST ([see more](../hir.html)) +HirId | identifies a particular node in the HIR by combining a def-id with an "intra-definition offset". +HIR Map | The HIR map, accessible via tcx.hir, allows you to quickly navigate the HIR and convert between various forms of identifiers. +ICE | internal compiler error. When the compiler crashes. +ICH | incremental compilation hash. ICHs are used as fingerprints for things such as HIR and crate metadata, to check if changes have been made. This is useful in incremental compilation to see if part of a crate has changed and should be recompiled. +inference variable | when doing type or region inference, an "inference variable" is a kind of special type/region that represents what you are trying to infer. Think of X in algebra. For example, if we are trying to infer the type of a variable in a program, we create an inference variable to represent that unknown type. +infcx | the inference context (see `librustc/infer`) +IR | Intermediate Representation. A general term in compilers. During compilation, the code is transformed from raw source (ASCII text) to various IRs. In Rust, these are primarily HIR, MIR, and LLVM IR. Each IR is well-suited for some set of computations. For example, MIR is well-suited for the borrow checker, and LLVM IR is well-suited for codegen because LLVM accepts it. +local crate | the crate currently being compiled. +LTO | Link-Time Optimizations. A set of optimizations offered by LLVM that occur just before the final binary is linked. These include optimizations like removing functions that are never used in the final program, for example. _ThinLTO_ is a variant of LTO that aims to be a bit more scalable and efficient, but possibly sacrifices some optimizations. You may also read issues in the Rust repo about "FatLTO", which is the loving nickname given to non-Thin LTO. LLVM documentation: [here][lto] and [here][thinlto] +[LLVM] | (actually not an acronym :P) an open-source compiler backend. It accepts LLVM IR and outputs native binaries. Various languages (e.g. Rust) can then implement a compiler front-end that output LLVM IR and use LLVM to compile to all the platforms LLVM supports. +MIR | the Mid-level IR that is created after type-checking for use by borrowck and codegen ([see more](../mir/index.html)) +miri | an interpreter for MIR used for constant evaluation ([see more](../miri.html)) +normalize | a general term for converting to a more canonical form, but in the case of rustc typically refers to [associated type normalization](../traits/associated-types.html#normalize) +newtype | a "newtype" is a wrapper around some other type (e.g., `struct Foo(T)` is a "newtype" for `T`). This is commonly used in Rust to give a stronger type for indices. +NLL | [non-lexical lifetimes](../borrow_check/region_inference.html), an extension to Rust's borrowing system to make it be based on the control-flow graph. +node-id or NodeId | an index identifying a particular node in the AST or HIR; gradually being phased out and replaced with `HirId`. +obligation | something that must be proven by the trait system ([see more](../traits/resolution.html)) +projection | a general term for a "relative path", e.g. `x.f` is a "field projection", and `T::Item` is an ["associated type projection"](../traits/goals-and-clauses.html#trait-ref) +promoted constants | constants extracted from a function and lifted to static scope; see [this section](../mir/index.html#promoted) for more details. +provider | the function that executes a query ([see more](../query.html)) +quantified | in math or logic, existential and universal quantification are used to ask questions like "is there any type T for which is true?" or "is this true for all types T?"; see [the background chapter for more](./background.html#quantified) +query | perhaps some sub-computation during compilation ([see more](../query.html)) +region | another term for "lifetime" often used in the literature and in the borrow checker. +rib | a data structure in the name resolver that keeps track of a single scope for names. ([see more](../name-resolution.html)) +sess | the compiler session, which stores global data used throughout compilation +side tables | because the AST and HIR are immutable once created, we often carry extra information about them in the form of hashtables, indexed by the id of a particular node. +sigil | like a keyword but composed entirely of non-alphanumeric tokens. For example, `&` is a sigil for references. +placeholder | **NOTE: skolemization is deprecated by placeholder** a way of handling subtyping around "for-all" types (e.g., `for<'a> fn(&'a u32)`) as well as solving higher-ranked trait bounds (e.g., `for<'a> T: Trait<'a>`). See [the chapter on placeholder and universes](../borrow_check/region_inference.html#placeholder) for more details. +soundness | soundness is a technical term in type theory. Roughly, if a type system is sound, then if a program type-checks, it is type-safe; i.e. I can never (in safe rust) force a value into a variable of the wrong type. (see "completeness"). +span | a location in the user's source code, used for error reporting primarily. These are like a file-name/line-number/column tuple on steroids: they carry a start/end point, and also track macro expansions and compiler desugaring. All while being packed into a few bytes (really, it's an index into a table). See the Span datatype for more. +substs | the substitutions for a given generic type or item (e.g. the `i32`, `u32` in `HashMap`) +tcx | the "typing context", main data structure of the compiler ([see more](../ty.html)) +'tcx | the lifetime of the currently active inference context ([see more](../ty.html)) +trait reference | the name of a trait along with a suitable set of input type/lifetimes ([see more](../traits/goals-and-clauses.html#trait-ref)) +token | the smallest unit of parsing. Tokens are produced after lexing ([see more](../the-parser.html)). +[TLS] | Thread-Local Storage. Variables may be defined so that each thread has its own copy (rather than all threads sharing the variable). This has some interactions with LLVM. Not all platforms support TLS. +trans | the code to translate MIR into LLVM IR. Renamed to codegen. +trait reference | a trait and values for its type parameters ([see more](../ty.html)). +ty | the internal representation of a type ([see more](../ty.html)). +UFCS | Universal Function Call Syntax. An unambiguous syntax for calling a method ([see more](../type-checking.html)). +uninhabited type | a type which has _no_ values. This is not the same as a ZST, which has exactly 1 value. An example of an uninhabited type is `enum Foo {}`, which has no variants, and so, can never be created. The compiler can treat code that deals with uninhabited types as dead code, since there is no such value to be manipulated. `!` (the never type) is an uninhabited type. Uninhabited types are also called "empty types". +upvar | a variable captured by a closure from outside the closure. +variance | variance determines how changes to a generic type/lifetime parameter affect subtyping; for example, if `T` is a subtype of `U`, then `Vec` is a subtype `Vec` because `Vec` is *covariant* in its generic parameter. See [the background chapter](./background.html#variance) for a more general explanation. See the [variance chapter](../variance.html) for an explanation of how type checking handles variance. +Wide pointer | a pointer with additional metadata. See "fat pointer" for more. +ZST | Zero-Sized Type. A type whose values have size 0 bytes. Since `2^0 = 1`, such types can have exactly one value. For example, `()` (unit) is a ZST. `struct Foo;` is also a ZST. The compiler can do some nice optimizations around ZSTs. + +[LLVM]: https://llvm.org/ +[lto]: https://llvm.org/docs/LinkTimeOptimization.html +[thinlto]: https://clang.llvm.org/docs/ThinLTO.html +[TLS]: https://llvm.org/docs/LangRef.html#thread-local-storage-models diff --git a/src/doc/rustc-guide/src/appendix/stupid-stats.md b/src/doc/rustc-guide/src/appendix/stupid-stats.md new file mode 100644 index 0000000000..a36cac42ba --- /dev/null +++ b/src/doc/rustc-guide/src/appendix/stupid-stats.md @@ -0,0 +1,412 @@ +# Appendix A: A tutorial on creating a drop-in replacement for rustc + +> **Note:** This is a copy of `@nrc`'s amazing [stupid-stats]. You should find +> a copy of the code on the GitHub repository although due to the compiler's +> constantly evolving nature, there is no guarantee it'll compile on the first +> go. + +Many tools benefit from being a drop-in replacement for a compiler. By this, I +mean that any user of the tool can use `mytool` in all the ways they would +normally use `rustc` - whether manually compiling a single file or as part of a +complex make project or Cargo build, etc. That could be a lot of work; +rustc, like most compilers, takes a large number of command line arguments which +can affect compilation in complex and interacting ways. Emulating all of this +behaviour in your tool is annoying at best, especically if you are making many +of the same calls into librustc that the compiler is. + +The kind of things I have in mind are tools like rustdoc or a future rustfmt. +These want to operate as closely as possible to real compilation, but have +totally different outputs (documentation and formatted source code, +respectively). Another use case is a customised compiler. Say you want to add a +custom code generation phase after macro expansion, then creating a new tool +should be easier than forking the compiler (and keeping it up to date as the +compiler evolves). + +I have gradually been trying to improve the API of librustc to make creating a +drop-in tool easier to produce (many others have also helped improve these +interfaces over the same time frame). It is now pretty simple to make a tool +which is as close to rustc as you want it to be. In this tutorial I'll show +how. + +Note/warning, everything I talk about in this tutorial is internal API for +rustc. It is all extremely unstable and likely to change often and in +unpredictable ways. Maintaining a tool which uses these APIs will be non- +trivial, although hopefully easier than maintaining one that does similar things +without using them. + +This tutorial starts with a very high level view of the rustc compilation +process and of some of the code that drives compilation. Then I'll describe how +that process can be customised. In the final section of the tutorial, I'll go +through an example - stupid-stats - which shows how to build a drop-in tool. + + +## Overview of the compilation process + +Compilation using rustc happens in several phases. We start with parsing, this +includes lexing. The output of this phase is an AST (abstract syntax tree). +There is a single AST for each crate (indeed, the entire compilation process +operates over a single crate). Parsing abstracts away details about individual +files which will all have been read in to the AST in this phase. At this stage +the AST includes all macro uses, attributes will still be present, and nothing +will have been eliminated due to `cfg`s. + +The next phase is configuration and macro expansion. This can be thought of as a +function over the AST. The unexpanded AST goes in and an expanded AST comes out. +Macros and syntax extensions are expanded, and `cfg` attributes will cause some +code to disappear. The resulting AST won't have any macros or macro uses left +in. + +The code for these first two phases is in [libsyntax](https://github.com/rust-lang/rust/tree/master/src/libsyntax). + +After this phase, the compiler allocates ids to each node in the AST +(technically not every node, but most of them). If we are writing out +dependencies, that happens now. + +The next big phase is analysis. This is the most complex phase and +uses the bulk of the code in rustc. This includes name resolution, type +checking, borrow checking, type and lifetime inference, trait selection, method +selection, linting, and so forth. Most error detection is done in this phase +(although parse errors are found during parsing). The 'output' of this phase is +a bunch of side tables containing semantic information about the source program. +The analysis code is in [librustc](https://github.com/rust-lang/rust/tree/master/src/librustc) +and a bunch of other crates with the 'librustc_' prefix. + +Next is translation, this translates the AST (and all those side tables) into +LLVM IR (intermediate representation). We do this by calling into the LLVM +libraries, rather than actually writing IR directly to a file. The code for +this is in librustc_trans. + +The next phase is running the LLVM backend. This runs LLVM's optimisation passes +on the generated IR and then generates machine code. The result is object files. +This phase is all done by LLVM, it is not really part of the rust compiler. The +interface between LLVM and rustc is in [librustc_llvm](https://github.com/rust-lang/rust/tree/master/src/librustc_llvm). + +Finally, we link the object files into an executable. Again we outsource this to +other programs and it's not really part of the rust compiler. The interface is +in librustc_back (which also contains some things used primarily during +translation). + +> NOTE: `librustc_trans` and `librustc_back` no longer exist, and we don't +> translate AST or HIR directly to LLVM IR anymore. Instead, see +> [`librustc_codegen_llvm`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_llvm/index.html) +> and [`librustc_codegen_utils`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_utils/index.html). + +All these phases are coordinated by the driver. To see the exact sequence, look +at [the `compile_input` function in `librustc_driver`][compile-input]. +The driver handles all the highest level coordination of compilation - + 1. handling command-line arguments + 2. maintaining compilation state (primarily in the `Session`) + 3. calling the appropriate code to run each phase of compilation + 4. handles high level coordination of pretty printing and testing. +To create a drop-in compiler replacement or a compiler replacement, +we leave most of compilation alone and customise the driver using its APIs. + +[compile-input]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_driver/driver/fn.compile_input.html + + +## The driver customisation APIs + +There are two primary ways to customise compilation - high level control of the +driver using `CompilerCalls` and controlling each phase of compilation using a +`CompileController`. The former lets you customise handling of command line +arguments etc., the latter lets you stop compilation early or execute code +between phases. + + +### `CompilerCalls` + +`CompilerCalls` is a trait that you implement in your tool. It contains a fairly +ad-hoc set of methods to hook in to the process of processing command line +arguments and driving the compiler. For details, see the comments in +[librustc_driver/lib.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_driver/index.html). +I'll summarise the methods here. + +`early_callback` and `late_callback` let you call arbitrary code at different +points - early is after command line arguments have been parsed, but before +anything is done with them; late is pretty much the last thing before +compilation starts, i.e., after all processing of command line arguments, etc. +is done. Currently, you get to choose whether compilation stops or continues at +each point, but you don't get to change anything the driver has done. You can +record some info for later, or perform other actions of your own. + +`some_input` and `no_input` give you an opportunity to modify the primary input +to the compiler (usually the input is a file containing the top module for a +crate, but it could also be a string). You could record the input or perform +other actions of your own. + +Ignore `parse_pretty`, it is unfortunate and hopefully will get improved. There +is a default implementation, so you can pretend it doesn't exist. + +`build_controller` returns a `CompileController` object for more fine-grained +control of compilation, it is described next. + +We might add more options in the future. + + +### `CompilerController` + +`CompilerController` is a struct consisting of `PhaseController`s and flags. +Currently, there is only flag, `make_glob_map` which signals whether to produce +a map of glob imports (used by save-analysis and potentially other tools). There +are probably flags in the session that should be moved here. + +There is a `PhaseController` for each of the phases described in the above +summary of compilation (and we could add more in the future for finer-grained +control). They are all `after_` a phase because they are checked at the end of a +phase (again, that might change), e.g., `CompilerController::after_parse` +controls what happens immediately after parsing (and before macro expansion). + +Each `PhaseController` contains a flag called `stop` which indicates whether +compilation should stop or continue, and a callback to be executed at the point +indicated by the phase. The callback is called whether or not compilation +continues. + +Information about the state of compilation is passed to these callbacks in a +`CompileState` object. This contains all the information the compiler has. Note +that this state information is immutable - your callback can only execute code +using the compiler state, it can't modify the state. (If there is demand, we +could change that). The state available to a callback depends on where during +compilation the callback is called. For example, after parsing there is an AST +but no semantic analysis (because the AST has not been analysed yet). After +translation, there is translation info, but no AST or analysis info (since these +have been consumed/forgotten). + + +## An example - stupid-stats + +Our example tool is very simple, it simply collects some simple and not very +useful statistics about a program; it is called stupid-stats. You can find +the (more heavily commented) complete source for the example on [Github](https://github.com/nick29581/stupid-stats/blob/master/src). +To build, just do `cargo build`. To run on a file `foo.rs`, do `cargo run +foo.rs` (assuming you have a Rust program called `foo.rs`. You can also pass any +command line arguments that you would normally pass to rustc). When you run it +you'll see output similar to + +```text +In crate: foo, + +Found 12 uses of `println!`; +The most common number of arguments is 1 (67% of all functions); +25% of functions have four or more arguments. +``` + +To make things easier, when we talk about functions, we're excluding methods and +closures. + +You can also use the executable as a drop-in replacement for rustc, because +after all, that is the whole point of this exercise. So, however you use rustc +in your makefile setup, you can use `target/stupid` (or whatever executable you +end up with) instead. That might mean setting an environment variable or it +might mean renaming your executable to `rustc` and setting your PATH. Similarly, +if you're using Cargo, you'll need to rename the executable to rustc and set the +PATH. Alternatively, you should be able to use +[multirust](https://github.com/brson/multirust) to get around all the PATH stuff +(although I haven't actually tried that). + +(Note that this example prints to stdout. I'm not entirely sure what Cargo does +with stdout from rustc under different circumstances. If you don't see any +output, try inserting a `panic!` after the `println!`s to error out, then Cargo +should dump stupid-stats' stdout to Cargo's stdout). + +Let's start with the `main` function for our tool, it is pretty simple: + +```rust,ignore +fn main() { + let args: Vec<_> = std::env::args().collect(); + rustc_driver::run_compiler(&args, &mut StupidCalls::new()); + std::env::set_exit_status(0); +} +``` + +The first line grabs any command line arguments. The second line calls the +compiler driver with those arguments. The final line sets the exit code for the +program. + +The only interesting thing is the `StupidCalls` object we pass to the driver. +This is our implementation of the `CompilerCalls` trait and is what will make +this tool different from rustc. + +`StupidCalls` is a mostly empty struct: + +```rust,ignore +struct StupidCalls { + default_calls: RustcDefaultCalls, +} +``` + +This tool is so simple that it doesn't need to store any data here, but usually +you would. We embed a `RustcDefaultCalls` object to delegate to in our impl when +we want exactly the same behaviour as the Rust compiler. Mostly you don't want +to do that (or at least don't need to) in a tool. However, Cargo calls rustc +with the `--print file-names`, so we delegate in `late_callback` and `no_input` +to keep Cargo happy. + +Most of the rest of the impl of `CompilerCalls` is trivial: + +```rust,ignore +impl<'a> CompilerCalls<'a> for StupidCalls { + fn early_callback(&mut self, + _: &getopts::Matches, + _: &config::Options, + _: &diagnostics::registry::Registry, + _: ErrorOutputType) + -> Compilation { + Compilation::Continue + } + + fn late_callback(&mut self, + t: &TransCrate, + m: &getopts::Matches, + s: &Session, + c: &CrateStore, + i: &Input, + odir: &Option, + ofile: &Option) + -> Compilation { + self.default_calls.late_callback(t, m, s, c, i, odir, ofile); + Compilation::Continue + } + + fn some_input(&mut self, + input: Input, + input_path: Option) + -> (Input, Option) { + (input, input_path) + } + + fn no_input(&mut self, + m: &getopts::Matches, + o: &config::Options, + odir: &Option, + ofile: &Option, + r: &diagnostics::registry::Registry) + -> Option<(Input, Option)> { + self.default_calls.no_input(m, o, odir, ofile, r); + + // This is not optimal error handling. + panic!("No input supplied to stupid-stats"); + } + + fn build_controller(&mut self, _: &Session) -> driver::CompileController<'a> { + ... + } +} +``` + +We don't do anything for either of the callbacks, nor do we change the input if +the user supplies it. If they don't, we just `panic!`, this is the simplest way +to handle the error, but not very user-friendly, a real tool would give a +constructive message or perform a default action. + +In `build_controller` we construct our `CompileController`. We only want to +parse, and we want to inspect macros before expansion, so we make compilation +stop after the first phase (parsing). The callback after that phase is where the +tool does it's actual work by walking the AST. We do that by creating an AST +visitor and making it walk the AST from the top (the crate root). Once we've +walked the crate, we print the stats we've collected: + +```rust,ignore +fn build_controller(&mut self, _: &Session) -> driver::CompileController<'a> { + // We mostly want to do what rustc does, which is what basic() will return. + let mut control = driver::CompileController::basic(); + // But we only need the AST, so we can stop compilation after parsing. + control.after_parse.stop = Compilation::Stop; + + // And when we stop after parsing we'll call this closure. + // Note that this will give us an AST before macro expansions, which is + // not usually what you want. + control.after_parse.callback = box |state| { + // Which extracts information about the compiled crate... + let krate = state.krate.unwrap(); + + // ...and walks the AST, collecting stats. + let mut visitor = StupidVisitor::new(); + visit::walk_crate(&mut visitor, krate); + + // And finally prints out the stupid stats that we collected. + let cratename = match attr::find_crate_name(&krate.attrs[]) { + Some(name) => name.to_string(), + None => String::from_str("unknown_crate"), + }; + println!("In crate: {},\n", cratename); + println!("Found {} uses of `println!`;", visitor.println_count); + + let (common, common_percent, four_percent) = visitor.compute_arg_stats(); + println!("The most common number of arguments is {} ({:.0}% of all functions);", + common, common_percent); + println!("{:.0}% of functions have four or more arguments.", four_percent); + }; + + control +} +``` + +That is all it takes to create your own drop-in compiler replacement or custom +compiler! For the sake of completeness I'll go over the rest of the stupid-stats +tool. + +```rust +struct StupidVisitor { + println_count: usize, + arg_counts: Vec, +} +``` + +The `StupidVisitor` struct just keeps track of the number of `println!`s it has +seen and the count for each number of arguments. It implements +`syntax::visit::Visitor` to walk the AST. Mostly we just use the default +methods, these walk the AST taking no action. We override `visit_item` and +`visit_mac` to implement custom behaviour when we walk into items (items include +functions, modules, traits, structs, and so forth, we're only interested in +functions) and macros: + +```rust,ignore +impl<'v> visit::Visitor<'v> for StupidVisitor { + fn visit_item(&mut self, i: &'v ast::Item) { + match i.node { + ast::Item_::ItemFn(ref decl, _, _, _, _) => { + // Record the number of args. + self.increment_args(decl.inputs.len()); + } + _ => {} + } + + // Keep walking. + visit::walk_item(self, i) + } + + fn visit_mac(&mut self, mac: &'v ast::Mac) { + // Find its name and check if it is "println". + let ast::Mac_::MacInvocTT(ref path, _, _) = mac.node; + if path_to_string(path) == "println" { + self.println_count += 1; + } + + // Keep walking. + visit::walk_mac(self, mac) + } +} +``` + +The `increment_args` method increments the correct count in +`StupidVisitor::arg_counts`. After we're done walking, `compute_arg_stats` does +some pretty basic maths to come up with the stats we want about arguments. + + +## What next? + +These APIs are pretty new and have a long way to go until they're really good. +If there are improvements you'd like to see or things you'd like to be able to +do, let me know in a comment or [GitHub issue](https://github.com/rust-lang/rust/issues). +In particular, it's not clear to me exactly what extra flexibility is required. +If you have an existing tool that would be suited to this setup, please try it +out and let me know if you have problems. + +It'd be great to see Rustdoc converted to using these APIs, if that is possible +(although long term, I'd prefer to see Rustdoc run on the output from save- +analysis, rather than doing its own analysis). Other parts of the compiler +(e.g., pretty printing, testing) could be refactored to use these APIs +internally (I already changed save-analysis to use `CompilerController`). I've +been experimenting with a prototype rustfmt which also uses these APIs. + +[stupid-stats]: https://github.com/nrc/stupid-stats diff --git a/src/doc/rustc-guide/src/borrow_check.md b/src/doc/rustc-guide/src/borrow_check.md new file mode 100644 index 0000000000..40858b1b4a --- /dev/null +++ b/src/doc/rustc-guide/src/borrow_check.md @@ -0,0 +1,63 @@ +# MIR borrow check + +The borrow check is Rust's "secret sauce" – it is tasked with +enforcing a number of properties: + +- That all variables are initialized before they are used. +- That you can't move the same value twice. +- That you can't move a value while it is borrowed. +- That you can't access a place while it is mutably borrowed (except through + the reference). +- That you can't mutate a place while it is shared borrowed. +- etc + +At the time of this writing, the code is in a state of transition. The +"main" borrow checker still works by processing [the HIR](hir.html), +but that is being phased out in favor of the MIR-based borrow checker. +Accordingly, this documentation focuses on the new, MIR-based borrow +checker. + +Doing borrow checking on MIR has several advantages: + +- The MIR is *far* less complex than the HIR; the radical desugaring + helps prevent bugs in the borrow checker. (If you're curious, you + can see + [a list of bugs that the MIR-based borrow checker fixes here][47366].) +- Even more importantly, using the MIR enables ["non-lexical lifetimes"][nll], + which are regions derived from the control-flow graph. + +[47366]: https://github.com/rust-lang/rust/issues/47366 +[nll]: http://rust-lang.github.io/rfcs/2094-nll.html + +### Major phases of the borrow checker + +The borrow checker source is found in +[the `rustc_mir::borrow_check` module][b_c]. The main entry point is +the [`mir_borrowck`] query. + +[b_c]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/borrow_check/index.html +[`mir_borrowck`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/borrow_check/fn.mir_borrowck.html + +- We first create a **local copy** of the MIR. In the coming steps, + we will modify this copy in place to modify the types and things to + include references to the new regions that we are computing. +- We then invoke [`replace_regions_in_mir`] to modify our local MIR. + Among other things, this function will replace all of the [regions](./appendix/glossary.html) in + the MIR with fresh [inference variables](./appendix/glossary.html). +- Next, we perform a number of + [dataflow analyses](./appendix/background.html#dataflow) that + compute what data is moved and when. +- We then do a [second type check](borrow_check/type_check.html) across the MIR: + the purpose of this type check is to determine all of the constraints between + different regions. +- Next, we do [region inference](borrow_check/region_inference.html), which computes + the values of each region — basically, points in the control-flow graph. +- At this point, we can compute the "borrows in scope" at each point. +- Finally, we do a second walk over the MIR, looking at the actions it + does and reporting errors. For example, if we see a statement like + `*a + 1`, then we would check that the variable `a` is initialized + and that it is not mutably borrowed, as either of those would + require an error to be reported. + - Doing this check requires the results of all the previous analyses. + +[`replace_regions_in_mir`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/borrow_check/nll/fn.replace_regions_in_mir.html diff --git a/src/doc/rustc-guide/src/borrow_check/moves_and_initialization.md b/src/doc/rustc-guide/src/borrow_check/moves_and_initialization.md new file mode 100644 index 0000000000..d1530d6c09 --- /dev/null +++ b/src/doc/rustc-guide/src/borrow_check/moves_and_initialization.md @@ -0,0 +1,50 @@ +# Tracking moves and initialization + +Part of the borrow checker's job is to track which variables are +"initialized" at any given point in time -- this also requires +figuring out where moves occur and tracking those. + +## Initialization and moves + +From a user's perspective, initialization -- giving a variable some +value -- and moves -- transfering ownership to another place -- might +seem like distinct topics. Indeed, our borrow checker error messages +often talk about them differently. But **within the borrow checker**, +they are not nearly as separate. Roughly speaking, the borrow checker +tracks the set of "initialized places" at any point in the source +code. Assigning to a previously uninitialized local variable adds it +to that set; moving from a local variable removes it from that set. + +Consider this example: + +```rust,ignore +fn foo() { + let a: Vec; + + // a is not initialized yet + + a = vec![22]; + + // a is initialized here + + std::mem::drop(a); // a is moved here + + // a is no longer initialized here + + let l = a.len(); //~ ERROR +} +``` + +Here you can see that `a` starts off as uninitialized; once it is +assigned, it becomes initialized. But when `drop(a)` is called, that +moves `a` into the call, and hence it becomes uninitialized again. + +## Subsections + +To make it easier to peruse, this section is broken into a number of +subsections: + +- [Move paths](./moves_and_initialization/move_paths.html) the + *move path* concept that we use to track which local variables (or parts of + local variables, in some cases) are initialized. +- TODO *Rest not yet written* =) diff --git a/src/doc/rustc-guide/src/borrow_check/moves_and_initialization/move_paths.md b/src/doc/rustc-guide/src/borrow_check/moves_and_initialization/move_paths.md new file mode 100644 index 0000000000..8fd7b3f196 --- /dev/null +++ b/src/doc/rustc-guide/src/borrow_check/moves_and_initialization/move_paths.md @@ -0,0 +1,129 @@ +# Move paths + +In reality, it's not enough to track initialization at the granularity +of local variables. Rust also allows us to do moves and initialization +at the field granularity: + +```rust,ignore +fn foo() { + let a: (Vec, Vec) = (vec![22], vec![44]); + + // a.0 and a.1 are both initialized + + let b = a.0; // moves a.0 + + // a.0 is not initializd, but a.1 still is + + let c = a.0; // ERROR + let d = a.1; // OK +} +``` + +To handle this, we track initialization at the granularity of a **move +path**. A [`MovePath`] represents some location that the user can +initialize, move, etc. So e.g. there is a move-path representing the +local variable `a`, and there is a move-path representing `a.0`. Move +paths roughly correspond to the concept of a [`Place`] from MIR, but +they are indexed in ways that enable us to do move analysis more +efficiently. + +[`MovePath`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MovePath.html +[`Place`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/enum.Place.html + +## Move path indices + +Although there is a [`MovePath`] data structure, they are never +referenced directly. Instead, all the code passes around *indices* of +type +[`MovePathIndex`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/indexes/struct.MovePathIndex.html). If +you need to get information about a move path, you use this index with +the [`move_paths` field of the `MoveData`][move_paths]. For example, +to convert a [`MovePathIndex`] `mpi` into a MIR [`Place`], you might +access the [`MovePath::place`] field like so: + +```rust,ignore +move_data.move_paths[mpi].place +``` + +[move_paths]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MoveData.html#structfield.move_paths +[`MovePath::place`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MovePath.html#structfield.place +[`MovePathIndex`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/indexes/struct.MovePathIndex.html + +## Building move paths + +One of the first things we do in the MIR borrow check is to construct +the set of move paths. This is done as part of the +[`MoveData::gather_moves`] function. This function uses a MIR visitor +called [`Gatherer`] to walk the MIR and look at how each [`Place`] +within is accessed. For each such [`Place`], it constructs a +corresponding [`MovePathIndex`]. It also records when/where that +particular move path is moved/initialized, but we'll get to that in a +later section. + +[`Gatherer`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/builder/struct.Gatherer.html +[`MoveData::gather_moves`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MoveData.html#method.gather_moves + +### Illegal move paths + +We don't actually create a move-path for **every** [`Place`] that gets +used. In particular, if it is illegal to move from a [`Place`], then +there is no need for a [`MovePathIndex`]. Some examples: + +- You cannot move from a static variable, so we do not create a [`MovePathIndex`] + for static variables. +- You cannot move an individual element of an array, so if we have e.g. `foo: [String; 3]`, + there would be no move-path for `foo[1]`. +- You cannot move from inside of a borrowed reference, so if we have e.g. `foo: &String`, + there would be no move-path for `*foo`. + +These rules are enforced by the [`move_path_for`] function, which +converts a [`Place`] into a [`MovePathIndex`] -- in error cases like +those just discussed, the function returns an `Err`. This in turn +means we don't have to bother tracking whether those places are +initialized (which lowers overhead). + +[`move_path_for`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/builder/struct.Gatherer.html#method.move_path_for + +## Looking up a move-path + +If you have a [`Place`] and you would like to convert it to a [`MovePathIndex`], you +can do that using the [`MovePathLookup`] structure found in the [`rev_lookup`] field +of [`MoveData`]. There are two different methods: + +[`MovePathLookup`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MovePathLookup.html +[`rev_lookup`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MoveData.html#structfield.rev_lookup + +- [`find_local`], which takes a [`mir::Local`] representing a local + variable. This is the easier method, because we **always** create a + [`MovePathIndex`] for every local variable. +- [`find`], which takes an arbitrary [`Place`]. This method is a bit + more annoying to use, precisely because we don't have a + [`MovePathIndex`] for **every** [`Place`] (as we just discussed in + the "illegal move paths" section). Therefore, [`find`] returns a + [`LookupResult`] indicating the closest path it was able to find + that exists (e.g., for `foo[1]`, it might return just the path for + `foo`). + +[`find`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MovePathLookup.html#method.find +[`find_local`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/struct.MovePathLookup.html#method.find_local +[`mir::Local`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/struct.Local.html +[`LookupResult`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/move_paths/enum.LookupResult.html + +## Cross-references + +As we noted above, move-paths are stored in a big vector and +referenced via their [`MovePathIndex`]. However, within this vector, +they are also structured into a tree. So for example if you have the +[`MovePathIndex`] for `a.b.c`, you can go to its parent move-path +`a.b`. You can also iterate over all children paths: so, from `a.b`, +you might iterate to find the path `a.b.c` (here you are iterating +just over the paths that are **actually referenced** in the source, +not all **possible** paths that could have been referenced). These +references are used for example in the [`has_any_child_of`] function, +which checks whether the dataflow results contain a value for the +given move-path (e.g., `a.b`) or any child of that move-path (e.g., +`a.b.c`). + +[`Place`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/enum.Place.html +[`has_any_child_of`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/dataflow/at_location/struct.FlowAtLocation.html#method.has_any_child_of + diff --git a/src/doc/rustc-guide/src/borrow_check/region_inference.md b/src/doc/rustc-guide/src/borrow_check/region_inference.md new file mode 100644 index 0000000000..95c2bc8044 --- /dev/null +++ b/src/doc/rustc-guide/src/borrow_check/region_inference.md @@ -0,0 +1,536 @@ +# Region inference (NLL) + +The MIR-based region checking code is located in +[the `rustc_mir::borrow_check::nll` module][nll]. (NLL, of course, +stands for "non-lexical lifetimes", a term that will hopefully be +deprecated once they become the standard kind of lifetime.) + +[nll]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/borrow_check/nll/index.html + +The MIR-based region analysis consists of two major functions: + +- `replace_regions_in_mir`, invoked first, has two jobs: + - First, it finds the set of regions that appear within the + signature of the function (e.g., `'a` in `fn foo<'a>(&'a u32) { + ... }`). These are called the "universal" or "free" regions – in + particular, they are the regions that [appear free][fvb] in the + function body. + - Second, it replaces all the regions from the function body with + fresh inference variables. This is because (presently) those + regions are the results of lexical region inference and hence are + not of much interest. The intention is that – eventually – they + will be "erased regions" (i.e., no information at all), since we + won't be doing lexical region inference at all. +- `compute_regions`, invoked second: this is given as argument the + results of move analysis. It has the job of computing values for all + the inference variables that `replace_regions_in_mir` introduced. + - To do that, it first runs the [MIR type checker](#mirtypeck). This + is basically a normal type-checker but specialized to MIR, which + is much simpler than full Rust of course. Running the MIR type + checker will however create **outlives constraints** between + region variables (e.g., that one variable must outlive another + one) to reflect the subtyping relationships that arise. + - It also adds **liveness constraints** that arise from where variables + are used. + - More details to come, though the [NLL RFC] also includes fairly thorough + (and hopefully readable) coverage. + +[fvb]: ../appendix/background.html#free-vs-bound +[NLL RFC]: http://rust-lang.github.io/rfcs/2094-nll.html + +## Universal regions + +*to be written* – explain the `UniversalRegions` type + +## Region variables and constraints + +*to be written* – describe the `RegionInferenceContext` and +the role of `liveness_constraints` vs other `constraints`, plus + +## Closures + +*to be written* + + + +## The MIR type-check + +## Representing the "values" of a region variable + +The value of a region can be thought of as a **set**; we call the +domain of this set a `RegionElement`. In the code, the value for all +regions is maintained in +[the `rustc_mir::borrow_check::nll::region_infer` module][ri]. For +each region we maintain a set storing what elements are present in its +value (to make this efficient, we give each kind of element an index, +the `RegionElementIndex`, and use sparse bitsets). + +[ri]: https://github.com/rust-lang/rust/tree/master/src/librustc_mir/borrow_check/nll/region_infer/ + +The kinds of region elements are as follows: + +- Each **location** in the MIR control-flow graph: a location is just + the pair of a basic block and an index. This identifies the point + **on entry** to the statement with that index (or the terminator, if + the index is equal to `statements.len()`). +- There is an element `end('a)` for each universal region `'a`, + corresponding to some portion of the caller's (or caller's caller, + etc) control-flow graph. +- Similarly, there is an element denoted `end('static)` corresponding + to the remainder of program execution after this function returns. +- There is an element `!1` for each placeholder region `!1`. This + corresponds (intuitively) to some unknown set of other elements – + for details on placeholders, see the section + [placeholders and universes](#placeholder). + +## Causal tracking + +*to be written* – describe how we can extend the values of a variable + with causal tracking etc + + + +## Placeholders and universes + +(This section describes ongoing work that hasn't landed yet.) + +From time to time we have to reason about regions that we can't +concretely know. For example, consider this program: + +```rust,ignore +// A function that needs a static reference +fn foo(x: &'static u32) { } + +fn bar(f: for<'a> fn(&'a u32)) { + // ^^^^^^^^^^^^^^^^^^^ a function that can accept **any** reference + let x = 22; + f(&x); +} + +fn main() { + bar(foo); +} +``` + +This program ought not to type-check: `foo` needs a static reference +for its argument, and `bar` wants to be given a function that that +accepts **any** reference (so it can call it with something on its +stack, for example). But *how* do we reject it and *why*? + +### Subtyping and Placeholders + +When we type-check `main`, and in particular the call `bar(foo)`, we +are going to wind up with a subtyping relationship like this one: + +```text +fn(&'static u32) <: for<'a> fn(&'a u32) +---------------- ------------------- +the type of `foo` the type `bar` expects +``` + +We handle this sort of subtyping by taking the variables that are +bound in the supertype and replacing them with +[universally quantified](../appendix/background.html#quantified) +representatives, written like `!1`. We call these regions "placeholder +regions" – they represent, basically, "some unknown region". + +Once we've done that replacement, we have the following relation: + +```text +fn(&'static u32) <: fn(&'!1 u32) +``` + +The key idea here is that this unknown region `'!1` is not related to +any other regions. So if we can prove that the subtyping relationship +is true for `'!1`, then it ought to be true for any region, which is +what we wanted. + +So let's work through what happens next. To check if two functions are +subtypes, we check if their arguments have the desired relationship +(fn arguments are [contravariant](../appendix/background.html#variance), so +we swap the left and right here): + +```text +&'!1 u32 <: &'static u32 +``` + +According to the basic subtyping rules for a reference, this will be +true if `'!1: 'static`. That is – if "some unknown region `!1`" lives +outlives `'static`. Now, this *might* be true – after all, `'!1` +could be `'static` – but we don't *know* that it's true. So this +should yield up an error (eventually). + +### What is a universe + +In the previous section, we introduced the idea of a placeholder +region, and we denoted it `!1`. We call this number `1` the **universe +index**. The idea of a "universe" is that it is a set of names that +are in scope within some type or at some point. Universes are formed +into a tree, where each child extends its parents with some new names. +So the **root universe** conceptually contains global names, such as +the the lifetime `'static` or the type `i32`. In the compiler, we also +put generic type parameters into this root universe (in this sense, +there is not just one root universe, but one per item). So consider +this function `bar`: + +```rust,ignore +struct Foo { } + +fn bar<'a, T>(t: &'a T) { + ... +} +``` + +Here, the root universe would consist of the lifetimes `'static` and +`'a`. In fact, although we're focused on lifetimes here, we can apply +the same concept to types, in which case the types `Foo` and `T` would +be in the root universe (along with other global types, like `i32`). +Basically, the root universe contains all the names that +[appear free](../appendix/background.html#free-vs-bound) in the body of `bar`. + +Now let's extend `bar` a bit by adding a variable `x`: + +```rust,ignore +fn bar<'a, T>(t: &'a T) { + let x: for<'b> fn(&'b u32) = ...; +} +``` + +Here, the name `'b` is not part of the root universe. Instead, when we +"enter" into this `for<'b>` (e.g., by replacing it with a placeholder), we will create +a child universe of the root, let's call it U1: + +```text +U0 (root universe) +│ +└─ U1 (child universe) +``` + +The idea is that this child universe U1 extends the root universe U0 +with a new name, which we are identifying by its universe number: +`!1`. + +Now let's extend `bar` a bit by adding one more variable, `y`: + +```rust,ignore +fn bar<'a, T>(t: &'a T) { + let x: for<'b> fn(&'b u32) = ...; + let y: for<'c> fn(&'b u32) = ...; +} +``` + +When we enter *this* type, we will again create a new universe, which +we'll call `U2`. Its parent will be the root universe, and U1 will be +its sibling: + +```text +U0 (root universe) +│ +├─ U1 (child universe) +│ +└─ U2 (child universe) +``` + +This implies that, while in U2, we can name things from U0 or U2, but +not U1. + +**Giving existential variables a universe.** Now that we have this +notion of universes, we can use it to extend our type-checker and +things to prevent illegal names from leaking out. The idea is that we +give each inference (existential) variable – whether it be a type or +a lifetime – a universe. That variable's value can then only +reference names visible from that universe. So for example is a +lifetime variable is created in U0, then it cannot be assigned a value +of `!1` or `!2`, because those names are not visible from the universe +U0. + +**Representing universes with just a counter.** You might be surprised +to see that the compiler doesn't keep track of a full tree of +universes. Instead, it just keeps a counter – and, to determine if +one universe can see another one, it just checks if the index is +greater. For example, U2 can see U0 because 2 >= 0. But U0 cannot see +U2, because 0 >= 2 is false. + +How can we get away with this? Doesn't this mean that we would allow +U2 to also see U1? The answer is that, yes, we would, **if that +question ever arose**. But because of the structure of our type +checker etc, there is no way for that to happen. In order for +something happening in the universe U1 to "communicate" with something +happening in U2, they would have to have a shared inference variable X +in common. And because everything in U1 is scoped to just U1 and its +children, that inference variable X would have to be in U0. And since +X is in U0, it cannot name anything from U1 (or U2). This is perhaps easiest +to see by using a kind of generic "logic" example: + +```text +exists { + forall { ... /* Y is in U1 ... */ } + forall { ... /* Z is in U2 ... */ } +} +``` + +Here, the only way for the two foralls to interact would be through X, +but neither Y nor Z are in scope when X is declared, so its value +cannot reference either of them. + +### Universes and placeholder region elements + +But where does that error come from? The way it happens is like this. +When we are constructing the region inference context, we can tell +from the type inference context how many placeholder variables exist +(the `InferCtxt` has an internal counter). For each of those, we +create a corresponding universal region variable `!n` and a "region +element" `placeholder(n)`. This corresponds to "some unknown set of other +elements". The value of `!n` is `{placeholder(n)}`. + +At the same time, we also give each existential variable a +**universe** (also taken from the `InferCtxt`). This universe +determines which placeholder elements may appear in its value: For +example, a variable in universe U3 may name `placeholder(1)`, `placeholder(2)`, and +`placeholder(3)`, but not `placeholder(4)`. Note that the universe of an inference +variable controls what region elements **can** appear in its value; it +does not say region elements **will** appear. + +### Placeholders and outlives constraints + +In the region inference engine, outlives constraints have the form: + +```text +V1: V2 @ P +``` + +where `V1` and `V2` are region indices, and hence map to some region +variable (which may be universally or existentially quantified). The +`P` here is a "point" in the control-flow graph; it's not important +for this section. This variable will have a universe, so let's call +those universes `U(V1)` and `U(V2)` respectively. (Actually, the only +one we are going to care about is `U(V1)`.) + +When we encounter this constraint, the ordinary procedure is to start +a DFS from `P`. We keep walking so long as the nodes we are walking +are present in `value(V2)` and we add those nodes to `value(V1)`. If +we reach a return point, we add in any `end(X)` elements. That part +remains unchanged. + +But then *after that* we want to iterate over the placeholder `placeholder(x)` +elements in V2 (each of those must be visible to `U(V2)`, but we +should be able to just assume that is true, we don't have to check +it). We have to ensure that `value(V1)` outlives each of those +placeholder elements. + +Now there are two ways that could happen. First, if `U(V1)` can see +the universe `x` (i.e., `x <= U(V1)`), then we can just add `placeholder(x)` +to `value(V1)` and be done. But if not, then we have to approximate: +we may not know what set of elements `placeholder(x)` represents, but we +should be able to compute some sort of **upper bound** B for it – +some region B that outlives `placeholder(x)`. For now, we'll just use +`'static` for that (since it outlives everything) – in the future, we +can sometimes be smarter here (and in fact we have code for doing this +already in other contexts). Moreover, since `'static` is in the root +universe U0, we know that all variables can see it – so basically if +we find that `value(V2)` contains `placeholder(x)` for some universe `x` +that `V1` can't see, then we force `V1` to `'static`. + +### Extending the "universal regions" check + +After all constraints have been propagated, the NLL region inference +has one final check, where it goes over the values that wound up being +computed for each universal region and checks that they did not get +'too large'. In our case, we will go through each placeholder region +and check that it contains *only* the `placeholder(u)` element it is known to +outlive. (Later, we might be able to know that there are relationships +between two placeholder regions and take those into account, as we do +for universal regions from the fn signature.) + +Put another way, the "universal regions" check can be considered to be +checking constraints like: + +```text +{placeholder(1)}: V1 +``` + +where `{placeholder(1)}` is like a constant set, and V1 is the variable we +made to represent the `!1` region. + +## Back to our example + +OK, so far so good. Now let's walk through what would happen with our +first example: + +```text +fn(&'static u32) <: fn(&'!1 u32) @ P // this point P is not imp't here +``` + +The region inference engine will create a region element domain like this: + +```text +{ CFG; end('static); placeholder(1) } + --- ------------ ------- from the universe `!1` + | 'static is always in scope + all points in the CFG; not especially relevant here +``` + +It will always create two universal variables, one representing +`'static` and one representing `'!1`. Let's call them Vs and V1. They +will have initial values like so: + +```text +Vs = { CFG; end('static) } // it is in U0, so can't name anything else +V1 = { placeholder(1) } +``` + +From the subtyping constraint above, we would have an outlives constraint like + +```text +'!1: 'static @ P +``` + +To process this, we would grow the value of V1 to include all of Vs: + +```text +Vs = { CFG; end('static) } +V1 = { CFG; end('static), placeholder(1) } +``` + +At that point, constraint propagation is complete, because all the +outlives relationships are satisfied. Then we would go to the "check +universal regions" portion of the code, which would test that no +universal region grew too large. + +In this case, `V1` *did* grow too large – it is not known to outlive +`end('static)`, nor any of the CFG – so we would report an error. + +## Another example + +What about this subtyping relationship? + +```text +for<'a> fn(&'a u32, &'a u32) + <: +for<'b, 'c> fn(&'b u32, &'c u32) +``` + +Here we would replace the bound region in the supertype with a placeholder, as before, yielding: + +```text +for<'a> fn(&'a u32, &'a u32) + <: +fn(&'!1 u32, &'!2 u32) +``` + +then we instantiate the variable on the left-hand side with an +existential in universe U2, yielding the following (`?n` is a notation +for an existential variable): + +```text +fn(&'?3 u32, &'?3 u32) + <: +fn(&'!1 u32, &'!2 u32) +``` + +Then we break this down further: + +```text +&'!1 u32 <: &'?3 u32 +&'!2 u32 <: &'?3 u32 +``` + +and even further, yield up our region constraints: + +```text +'!1: '?3 +'!2: '?3 +``` + +Note that, in this case, both `'!1` and `'!2` have to outlive the +variable `'?3`, but the variable `'?3` is not forced to outlive +anything else. Therefore, it simply starts and ends as the empty set +of elements, and hence the type-check succeeds here. + +(This should surprise you a little. It surprised me when I first realized it. +We are saying that if we are a fn that **needs both of its arguments to have +the same region**, we can accept being called with **arguments with two +distinct regions**. That seems intuitively unsound. But in fact, it's fine, as +I tried to explain in [this issue][ohdeargoditsallbroken] on the Rust issue +tracker long ago. The reason is that even if we get called with arguments of +two distinct lifetimes, those two lifetimes have some intersection (the call +itself), and that intersection can be our value of `'a` that we use as the +common lifetime of our arguments. -nmatsakis) + +[ohdeargoditsallbroken]: https://github.com/rust-lang/rust/issues/32330#issuecomment-202536977 + +## Final example + +Let's look at one last example. We'll extend the previous one to have +a return type: + +```text +for<'a> fn(&'a u32, &'a u32) -> &'a u32 + <: +for<'b, 'c> fn(&'b u32, &'c u32) -> &'b u32 +``` + +Despite seeming very similar to the previous example, this case is going to get +an error. That's good: the problem is that we've gone from a fn that promises +to return one of its two arguments, to a fn that is promising to return the +first one. That is unsound. Let's see how it plays out. + +First, we replace the bound region in the supertype with a placeholder: + +```text +for<'a> fn(&'a u32, &'a u32) -> &'a u32 + <: +fn(&'!1 u32, &'!2 u32) -> &'!1 u32 +``` + +Then we instantiate the subtype with existentials (in U2): + +```text +fn(&'?3 u32, &'?3 u32) -> &'?3 u32 + <: +fn(&'!1 u32, &'!2 u32) -> &'!1 u32 +``` + +And now we create the subtyping relationships: + +```text +&'!1 u32 <: &'?3 u32 // arg 1 +&'!2 u32 <: &'?3 u32 // arg 2 +&'?3 u32 <: &'!1 u32 // return type +``` + +And finally the outlives relationships. Here, let V1, V2, and V3 be the +variables we assign to `!1`, `!2`, and `?3` respectively: + +```text +V1: V3 +V2: V3 +V3: V1 +``` + +Those variables will have these initial values: + +```text +V1 in U1 = {placeholder(1)} +V2 in U2 = {placeholder(2)} +V3 in U2 = {} +``` + +Now because of the `V3: V1` constraint, we have to add `placeholder(1)` into `V3` (and +indeed it is visible from `V3`), so we get: + +```text +V3 in U2 = {placeholder(1)} +``` + +then we have this constraint `V2: V3`, so we wind up having to enlarge +`V2` to include `placeholder(1)` (which it can also see): + +```text +V2 in U2 = {placeholder(1), placeholder(2)} +``` + +Now constraint propagation is done, but when we check the outlives +relationships, we find that `V2` includes this new element `placeholder(1)`, +so we report an error. + diff --git a/src/doc/rustc-guide/src/borrow_check/type_check.md b/src/doc/rustc-guide/src/borrow_check/type_check.md new file mode 100644 index 0000000000..ee955d9717 --- /dev/null +++ b/src/doc/rustc-guide/src/borrow_check/type_check.md @@ -0,0 +1,10 @@ +# The MIR type-check + +A key component of the borrow check is the +[MIR type-check](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/borrow_check/nll/type_check/index.html). +This check walks the MIR and does a complete "type check" -- the same +kind you might find in any other language. In the process of doing +this type-check, we also uncover the region constraints that apply to +the program. + +TODO -- elaborate further? Maybe? :) diff --git a/src/doc/rustc-guide/src/build-install-distribution-artifacts.md b/src/doc/rustc-guide/src/build-install-distribution-artifacts.md new file mode 100644 index 0000000000..7430ffb9b8 --- /dev/null +++ b/src/doc/rustc-guide/src/build-install-distribution-artifacts.md @@ -0,0 +1,29 @@ +# Build distribution artifacts + +You might want to build and package up the compiler for distribution. +You’ll want to run this command to do it: + + ```bash + ./x.py dist + ``` + +# Install distribution artifacts + +If you’ve built a distribution artifact you might want to install it and +test that it works on your target system. You’ll want to run this command: + + ```bash + ./x.py install + ``` + + Note: If you are testing out a modification to a compiler, you + might want to use it to compile some project. + Usually, you do not want to use ./x.py install for testing. + Rather, you should create a toolchain as discussed in + [here][create-rustup-toolchain]. + + For example, if the toolchain you created is called foo, you + would then invoke it with `rustc +foo ...` (where ... represents + the rest of the arguments). + +[create-rustup-toolchain]: ./how-to-build-and-run.md#creating-a-rustup-toolchain \ No newline at end of file diff --git a/src/doc/rustc-guide/src/codegen.md b/src/doc/rustc-guide/src/codegen.md new file mode 100644 index 0000000000..766dfff59d --- /dev/null +++ b/src/doc/rustc-guide/src/codegen.md @@ -0,0 +1,54 @@ +# Code generation + +Code generation or "codegen" is the part of the compiler that actually +generates an executable binary. rustc uses LLVM for code generation. + +> NOTE: If you are looking for hints on how to debug code generation bugs, +> please see [this section of the debugging chapter][debug]. + +[debug]: compiler-debugging.html#debugging-llvm + +## What is LLVM? + +All of the preceding chapters of this guide have one thing in common: we never +generated any executable machine code at all! With this chapter, all of that +changes. + +Like most compilers, rustc is composed of a "frontend" and a "backend". The +"frontend" is responsible for taking raw source code, checking it for +correctness, and getting it into a format `X` from which we can generate +executable machine code. The "backend" then takes that format `X` and produces +(possibly optimized) executable machine code for some platform. All of the +previous chapters deal with rustc's frontend. + +rustc's backend is [LLVM](https://llvm.org), "a collection of modular and +reusable compiler and toolchain technologies". In particular, the LLVM project +contains a pluggable compiler backend (also called "LLVM"), which is used by +many compiler projects, including the `clang` C compiler and our beloved +`rustc`. + +LLVM's "format `X`" is called LLVM IR. It is basically assembly code with +additional low-level types and annotations added. These annotations are helpful +for doing optimizations on the LLVM IR and outputted machine code. The end +result of all this is (at long last) something executable (e.g. an ELF object +or wasm). + +There are a few benefits to using LLVM: + +- We don't have to write a whole compiler backend. This reduces implementation + and maintenance burden. +- We benefit from the large suite of advanced optimizations that the LLVM + project has been collecting. +- We automatically can compile Rust to any of the platforms for which LLVM has + support. For example, as soon as LLVM added support for wasm, voila! rustc, + clang, and a bunch of other languages were able to compile to wasm! (Well, + there was some extra stuff to be done, but we were 90% there anyway). +- We and other compiler projects benefit from each other. For example, when the + [Spectre and Meltdown security vulnerabilities][spectre] were discovered, + only LLVM needed to be patched. + +[spectre]: https://meltdownattack.com/ + +## Generating LLVM IR + +TODO diff --git a/src/doc/rustc-guide/src/compiler-debugging.md b/src/doc/rustc-guide/src/compiler-debugging.md new file mode 100644 index 0000000000..aca7f7424b --- /dev/null +++ b/src/doc/rustc-guide/src/compiler-debugging.md @@ -0,0 +1,390 @@ +**Note: This is copied from the +[rust-forge](https://github.com/rust-lang-nursery/rust-forge). If anything needs + updating, please open an issue or make a PR on the github repo.** + +# Debugging the compiler +[debugging]: #debugging + +Here are a few tips to debug the compiler: + +## Getting a backtrace +[getting-a-backtrace]: #getting-a-backtrace + +When you have an ICE (panic in the compiler), you can set +`RUST_BACKTRACE=1` to get the stack trace of the `panic!` like in +normal Rust programs. IIRC backtraces **don't work** on Mac and on MinGW, +sorry. If you have trouble or the backtraces are full of `unknown`, +you might want to find some way to use Linux or MSVC on Windows. + +In the default configuration, you don't have line numbers enabled, so the +backtrace looks like this: + +```text +stack backtrace: + 0: std::sys::imp::backtrace::tracing::imp::unwind_backtrace + 1: std::sys_common::backtrace::_print + 2: std::panicking::default_hook::{{closure}} + 3: std::panicking::default_hook + 4: std::panicking::rust_panic_with_hook + 5: std::panicking::begin_panic + (~~~~ LINES REMOVED BY ME FOR BREVITY ~~~~) + 32: rustc_typeck::check_crate + 33: >::with + 34: >::with + 35: rustc::ty::context::TyCtxt::create_and_enter + 36: rustc_driver::driver::compile_input + 37: rustc_driver::run_compiler +``` + +If you want line numbers for the stack trace, you can enable +`debuginfo-lines=true` or `debuginfo=true` in your config.toml and rebuild the +compiler. Then the backtrace will look like this: + +```text +stack backtrace: + (~~~~ LINES REMOVED BY ME FOR BREVITY ~~~~) + at /home/user/rust/src/librustc_typeck/check/cast.rs:110 + 7: rustc_typeck::check::cast::CastCheck::check + at /home/user/rust/src/librustc_typeck/check/cast.rs:572 + at /home/user/rust/src/librustc_typeck/check/cast.rs:460 + at /home/user/rust/src/librustc_typeck/check/cast.rs:370 + (~~~~ LINES REMOVED BY ME FOR BREVITY ~~~~) + 33: rustc_driver::driver::compile_input + at /home/user/rust/src/librustc_driver/driver.rs:1010 + at /home/user/rust/src/librustc_driver/driver.rs:212 + 34: rustc_driver::run_compiler + at /home/user/rust/src/librustc_driver/lib.rs:253 +``` + +## Getting a backtrace for errors +[getting-a-backtrace-for-errors]: #getting-a-backtrace-for-errors + +If you want to get a backtrace to the point where the compiler emits +an error message, you can pass the `-Z treat-err-as-bug`, which +will make the compiler panic on the first error it sees. + +This can also help when debugging `delay_span_bug` calls - it will make +the first `delay_span_bug` call panic, which will give you a useful backtrace. + +For example: + +```bash +$ cat error.rs +fn main() { + 1 + (); +} +``` + +```bash +$ ./build/x86_64-unknown-linux-gnu/stage1/bin/rustc error.rs +error[E0277]: the trait bound `{integer}: std::ops::Add<()>` is not satisfied + --> error.rs:2:7 + | +2 | 1 + (); + | ^ no implementation for `{integer} + ()` + | + = help: the trait `std::ops::Add<()>` is not implemented for `{integer}` + +error: aborting due to previous error + +$ # Now, where does the error above come from? +$ RUST_BACKTRACE=1 \ + ./build/x86_64-unknown-linux-gnu/stage1/bin/rustc \ + error.rs \ + -Z treat-err-as-bug +error[E0277]: the trait bound `{integer}: std::ops::Add<()>` is not satisfied + --> error.rs:2:7 + | +2 | 1 + (); + | ^ no implementation for `{integer} + ()` + | + = help: the trait `std::ops::Add<()>` is not implemented for `{integer}` + +error: internal compiler error: unexpected panic + +note: the compiler unexpectedly panicked. this is a bug. + +note: we would appreciate a bug report: https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md#bug-reports + +note: rustc 1.24.0-dev running on x86_64-unknown-linux-gnu + +note: run with `RUST_BACKTRACE=1` for a backtrace + +thread 'rustc' panicked at 'encountered error with `-Z treat_err_as_bug', +/home/user/rust/src/librustc_errors/lib.rs:411:12 +note: Some details are omitted, run with `RUST_BACKTRACE=full` for a verbose +backtrace. +stack backtrace: + (~~~ IRRELEVANT PART OF BACKTRACE REMOVED BY ME ~~~) + 7: rustc::traits::error_reporting::>::report_selection_error + at /home/user/rust/src/librustc/traits/error_reporting.rs:823 + 8: rustc::traits::error_reporting::>::report_fulfillment_errors + at /home/user/rust/src/librustc/traits/error_reporting.rs:160 + at /home/user/rust/src/librustc/traits/error_reporting.rs:112 + 9: rustc_typeck::check::FnCtxt::select_obligations_where_possible + at /home/user/rust/src/librustc_typeck/check/mod.rs:2192 + (~~~ IRRELEVANT PART OF BACKTRACE REMOVED BY ME ~~~) + 36: rustc_driver::run_compiler + at /home/user/rust/src/librustc_driver/lib.rs:253 +$ # Cool, now I have a backtrace for the error +``` + +## Getting logging output +[getting-logging-output]: #getting-logging-output + +The compiler has a lot of `debug!` calls, which print out logging information +at many points. These are very useful to at least narrow down the location of +a bug if not to find it entirely, or just to orient yourself as to why the +compiler is doing a particular thing. + +To see the logs, you need to set the `RUST_LOG` environment variable to +your log filter, e.g. to get the logs for a specific module, you can run the +compiler as `RUST_LOG=module::path rustc my-file.rs`. The Rust logs are +powered by [env-logger], and you can look at the docs linked there to see +the full `RUST_LOG` syntax. All `debug!` output will then appear in +standard error. + +Note that unless you use a very strict filter, the logger will emit a *lot* +of output - so it's typically a good idea to pipe standard error to a file +and look at the log output with a text editor. + +So to put it together. + +```bash +# This puts the output of all debug calls in `librustc/traits` into +# standard error, which might fill your console backscroll. +$ RUST_LOG=rustc::traits rustc +local my-file.rs + +# This puts the output of all debug calls in `librustc/traits` in +# `traits-log`, so you can then see it with a text editor. +$ RUST_LOG=rustc::traits rustc +local my-file.rs 2>traits-log + +# Not recommended. This will show the output of all `debug!` calls +# in the Rust compiler, and there are a *lot* of them, so it will be +# hard to find anything. +$ RUST_LOG=debug rustc +local my-file.rs 2>all-log + +# This will show the output of all `info!` calls in `rustc_trans`. +# +# There's an `info!` statement in `trans_instance` that outputs +# every function that is translated. This is useful to find out +# which function triggers an LLVM assertion, and this is an `info!` +# log rather than a `debug!` log so it will work on the official +# compilers. +$ RUST_LOG=rustc_trans=info rustc +local my-file.rs +``` + +While calls to `info!` are included in every build of the compiler, +calls to `debug!` are only included in the program if the +`debug-assertions=yes` is turned on in config.toml (it is +turned off by default), so if you don't see `DEBUG` logs, especially +if you run the compiler with `RUST_LOG=rustc rustc some.rs` and only see +`INFO` logs, make sure that `debug-assertions=yes` is turned on in your +config.toml. + +I also think that in some cases just setting it will not trigger a rebuild, +so if you changed it and you already have a compiler built, you might +want to call `x.py clean` to force one. + +### Logging etiquette + +Because calls to `debug!` are removed by default, in most cases, don't worry +about adding "unnecessary" calls to `debug!` and leaving them in code you +commit - they won't slow down the performance of what we ship, and if they +helped you pinning down a bug, they will probably help someone else with a +different one. + +However, there are still a few concerns that you might care about: + +### Expensive operations in logs + +A note of caution: the expressions *within* the `debug!` call are run +whenever RUST_LOG is set, even if the filter would exclude the log. This means +that if in the module `rustc::foo` you have a statement + +```Rust +debug!("{:?}", random_operation(tcx)); +``` + +Then if someone runs a debug `rustc` with `RUST_LOG=rustc::bar`, then +`random_operation()` will still run - even while it's output will never be +needed! + +This means that you should not put anything too expensive or likely +to crash there - that would annoy anyone who wants to use logging for their own +module. Note that if `RUST_LOG` is unset (the default), then the code will not +run - this means that if your logging code panics, then no-one will know it +until someone tries to use logging to find *another* bug. + +If you *need* to do an expensive operation in a log, be aware that while log +expressions are *evaluated* even if logging is not enabled in your module, +they are not *formatted* unless it *is*. This means you can put your +expensive/crashy operations inside an `fmt::Debug` impl, and they will not be +run unless your log is enabled: + +```Rust +use std::fmt; + +struct ExpensiveOperationContainer<'a, 'gcx, 'tcx> + where 'tcx: 'gcx, 'a: 'tcx +{ + tcx: TyCtxt<'a, 'gcx, 'tcx> +} + +impl<'a, 'gcx, 'tcx> fmt::Debug for ExpensiveOperationContainer<'a, 'gcx, 'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let value = random_operation(tcx); + fmt::Debug::fmt(&value, fmt) + } +} + +debug!("{:?}", ExpensiveOperationContainer { tcx }); +``` + +## Formatting Graphviz output (.dot files) +[formatting-graphviz-output]: #formatting-graphviz-output + +Some compiler options for debugging specific features yield graphviz graphs - +e.g. the `#[rustc_mir(borrowck_graphviz_postflow="suffix.dot")]` attribute +dumps various borrow-checker dataflow graphs. + +These all produce `.dot` files. To view these files, install graphviz (e.g. +`apt-get install graphviz`) and then run the following commands: + +```bash +$ dot -T pdf maybe_init_suffix.dot > maybe_init_suffix.pdf +$ firefox maybe_init_suffix.pdf # Or your favorite pdf viewer +``` + +## Debugging LLVM +[debugging-llvm]: #debugging-llvm + +> NOTE: If you are looking for info about code generation, please see [this +> chapter][codegen] instead. + +[codegen]: codegen.html + +This section is about debugging compiler bugs in code generation (e.g. why the +compiler generated some piece of code or crashed in LLVM). LLVM is a big +project on its own that probably needs to have its own debugging document (not +that I could find one). But here are some tips that are important in a rustc +context: + +As a general rule, compilers generate lots of information from analyzing code. +Thus, a useful first step is usually to find a minimal example. One way to do +this is to + +1. create a new crate that reproduces the issue (e.g. adding whatever crate is +at fault as a dependency, and using it from there) + +2. minimize the crate by removing external dependencies; that is, moving +everything relevant to the new crate + +3. further minimize the issue by making the code shorter (there are tools that +help with this like `creduce`) + +The official compilers (including nightlies) have LLVM assertions disabled, +which means that LLVM assertion failures can show up as compiler crashes (not +ICEs but "real" crashes) and other sorts of weird behavior. If you are +encountering these, it is a good idea to try using a compiler with LLVM +assertions enabled - either an "alt" nightly or a compiler you build yourself +by setting `[llvm] assertions=true` in your config.toml - and see whether +anything turns up. + +The rustc build process builds the LLVM tools into +`./build//llvm/bin`. They can be called directly. + +The default rustc compilation pipeline has multiple codegen units, which is +hard to replicate manually and means that LLVM is called multiple times in +parallel. If you can get away with it (i.e. if it doesn't make your bug +disappear), passing `-C codegen-units=1` to rustc will make debugging easier. + +To rustc to generate LLVM IR, you need to pass the `--emit=llvm-ir` flag. If +you are building via cargo, use the `RUSTFLAGS` environment variable (e.g. +`RUSTFLAGS='--emit=llvm-ir'`). This causes rustc to spit out LLVM IR into the +target directory. + +`cargo llvm-ir [options] path` spits out the LLVM IR for a particular function +at `path`. (`cargo install cargo-asm` installs `cargo asm` and `cargo +llvm-ir`). `--build-type=debug` emits code for debug builds. There are also +other useful options. Also, debug info in LLVM IR can clutter the output a lot: +`RUSTFLAGS="-C debuginfo=0"` is really useful. + +`RUSTFLAGS="-C save-temps"` outputs LLVM bitcode (not the same as IR) at +different stages during compilation, which is sometimes useful. One just needs +to convert the bitcode files to `.ll` files using `llvm-dis` which should be in +the target local compilation of rustc. + +If you want to play with the optimization pipeline, you can use the `opt` tool +from `./build//llvm/bin/` with the LLVM IR emitted by rustc. Note +that rustc emits different IR depending on whether `-O` is enabled, even +without LLVM's optimizations, so if you want to play with the IR rustc emits, +you should: + +```bash +$ rustc +local my-file.rs --emit=llvm-ir -O -C no-prepopulate-passes \ + -C codegen-units=1 +$ OPT=./build/$TRIPLE/llvm/bin/opt +$ $OPT -S -O2 < my-file.ll > my +``` + +If you just want to get the LLVM IR during the LLVM pipeline, to e.g. see which +IR causes an optimization-time assertion to fail, or to see when LLVM performs +a particular optimization, you can pass the rustc flag `-C +llvm-args=-print-after-all`, and possibly add `-C +llvm-args='-filter-print-funcs=EXACT_FUNCTION_NAME` (e.g. `-C +llvm-args='-filter-print-funcs=_ZN11collections3str21_$LT$impl$u20$str$GT$\ +7replace17hbe10ea2e7c809b0bE'`). + +That produces a lot of output into standard error, so you'll want to pipe that +to some file. Also, if you are using neither `-filter-print-funcs` nor `-C +codegen-units=1`, then, because the multiple codegen units run in parallel, the +printouts will mix together and you won't be able to read anything. + +If you want just the IR for a specific function (say, you want to see why it +causes an assertion or doesn't optimize correctly), you can use `llvm-extract`, +e.g. + +```bash +$ ./build/$TRIPLE/llvm/bin/llvm-extract \ + -func='_ZN11collections3str21_$LT$impl$u20$str$GT$7replace17hbe10ea2e7c809b0bE' \ + -S \ + < unextracted.ll \ + > extracted.ll +``` + +### Filing LLVM bug reports + +When filing an LLVM bug report, you will probably want some sort of minimal +working example that demonstrates the problem. The Godbolt compiler explorer is +really helpful for this. + +1. Once you have some LLVM IR for the problematic code (see above), you can +create a minimal working example with Godbolt. Go to +[gcc.godbolt.org](https://gcc.godbolt.org). + +2. Choose `LLVM-IR` as programming language. + +3. Use `llc` to compile the IR to a particular target as is: + - There are some useful flags: `-mattr` enables target features, `-march=` + selects the target, `-mcpu=` selects the CPU, etc. + - Commands like `llc -march=help` output all architectures available, which + is useful because sometimes the Rust arch names and the LLVM names do not + match. + - If you have compiled rustc yourself somewhere, in the target directory + you have binaries for `llc`, `opt`, etc. + +4. If you want to optimize the LLVM-IR, you can use `opt` to see how the LLVM + optimizations transform it. + +5. Once you have a godbolt link demonstrating the issue, it is pretty easy to + fill in an LLVM bug. + + +[env-logger]: https://docs.rs/env_logger/0.4.3/env_logger/ + +## Narrowing (Bisecting) Regressions + +The [cargo-bisect-rustc](https://github.com/rust-lang-nursery/cargo-bisect-rustc) tool can be used as a quick and easy way to find exactly which PR caused a change in `rustc` behavior. It automatically downloads `rustc` PR artifacts and tests them against a project you provide until it finds the regression. You can then look at the PR to get more context on *why* it was changed. See [this tutorial](https://github.com/rust-lang-nursery/cargo-bisect-rustc/blob/master/TUTORIAL.md) on how to use it. diff --git a/src/doc/rustc-guide/src/compiler-documenting.md b/src/doc/rustc-guide/src/compiler-documenting.md new file mode 100644 index 0000000000..97636fad5b --- /dev/null +++ b/src/doc/rustc-guide/src/compiler-documenting.md @@ -0,0 +1,62 @@ +# Documenting rustc + +You might want to build documentation of the various components +available like the standard library. There’s two ways to go about this. + You can run rustdoc directly on the file to make sure the HTML is + correct, which is fast. Alternatively, you can build the documentation + as part of the build process through x.py. Both are viable methods + since documentation is more about the content. + +## Document everything + + ```bash + ./x.py doc + ``` + +## If you want to avoid the whole Stage 2 build + +```bash +./x.py doc --stage 1 +``` + +First the compiler and rustdoc get built to make sure everything is okay +and then it documents the files. + +## Document specific components + +```bash + ./x.py doc src/doc/book + ./x.py doc src/doc/nomicon + ./x.py doc src/doc/book src/libstd +``` + +Much like individual tests or building certain components you can build only + the documentation you want. + +## Document internal rustc items + +Compiler documentation is not built by default. There's a flag in +config.toml for achieving the same. +But, when enabled, compiler documentation does include internal items. + +Next open up config.toml and make sure these two lines are set to true: + +```bash +docs = true +compiler-docs = true +``` + +When you want to build the compiler docs as well run this command: + +```bash +./x.py doc +``` + +This will see that the docs and compiler-docs options are set to true +and build the normally hidden compiler docs! + +### Compiler Documentation + +The documentation for the rust components are found at [rustc doc]. + +[rustc doc]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ \ No newline at end of file diff --git a/src/doc/rustc-guide/src/compiler-team.md b/src/doc/rustc-guide/src/compiler-team.md new file mode 100644 index 0000000000..a6328198ca --- /dev/null +++ b/src/doc/rustc-guide/src/compiler-team.md @@ -0,0 +1,119 @@ +# About the compiler team + +rustc is maintained by the +[Rust compiler team](https://www.rust-lang.org/en-US/team.html). The +people who belong to this team collectively work to track regressions +and implement new features. Members of the Rust compiler team are +people who have made significant contributions to rustc and its +design. + +## Discussion + +Currently the compiler team chats in a number of places. There is an +ongoing [thread] on the internals board about trying to find a permanent +home. In any case, you can find people in one of three places at the moment: + +- The `#rustc` channel on mozilla's IRC (`irc.mozilla.org`) +- The `t-compiler` stream on [the Zulip instance](https://rust-lang.zulipchat.com/#narrow/stream/131828-t-compiler) +- The `compiler` channel on the [rust-lang discord](https://discord.gg/rust-lang) + +## Rust compiler meeting + +The compiler team has a weekly meeting where we do triage and try to +generally stay on top of new bugs, regressions, and other things. This +general plan for this meeting can be found in +[the rust-compiler-meeting etherpad][etherpad]. It works roughly as +follows: + +- **Review P-high bugs:** P-high bugs are those that are sufficiently + important for us to actively track progress. P-high bugs should + ideally always have an assignee. +- **Look over new regressions:** we then look for new cases where the + compiler broke previously working code in the wild. Regressions are + almost always marked as P-high; the major exception would be bug + fixes (though even there we often + [aim to give warnings first][procedure]). +- **Check I-nominated issues:** These are issues where feedback from + the team is desired. +- **Check for beta nominations:** These are nominations of things to + backport to beta. + +The meeting currently takes place on Thursdays at 10am Boston time +(UTC-4 typically, but daylight savings time sometimes makes things +complicated). + +The meeting is held over a "chat medium" — it used to be IRC, but we +are currently in the process of evaluating other alternatives. Check +the [etherpad] to find the current home (and see +[this internals thread][thread] for some ongoing discussion). + +[etherpad]: https://public.etherpad-mozilla.org/p/rust-compiler-meeting +[thread]: https://internals.rust-lang.org/t/where-should-the-compiler-team-and-perhaps-working-groups-chat/7894 +[procedure]: https://forge.rust-lang.org/rustc-bug-fix-procedure.html + +## Team membership + +Membership in the Rust team is typically offered when someone has been +making significant contributions to the compiler for some +time. Membership is both a recognition but also an obligation: +compiler team members are generally expected to help with upkeep as +well as doing reviews and other work. + +If you are interested in becoming a compiler team member, the first +thing to do is to start fixing some bugs, or get involved in a working +group. One good way to find bugs is to look for +[open issues tagged with E-easy](https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-easy) +or +[E-mentor](https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-mentor). + +### r+ rights + +Once you have made a number of individual PRs to rustc, we will often +offer r+ privileges. This means that you have the right to instruct +"bors" (the robot that manages which PRs get landed into rustc) to +merge a PR +([here are some instructions for how to talk to bors][homu-guide]). + +[homu-guide]: https://buildbot2.rust-lang.org/homu/ + +The guidelines for reviewers are as follows: + +- You are always welcome to review any PR, regardless of who it is + assigned to. However, do not r+ PRs unless: + - You are confident in that part of the code. + - You are confident that nobody else wants to review it first. + - For example, sometimes people will express a desire to review a + PR before it lands, perhaps because it touches a particularly + sensitive part of the code. +- Always be polite when reviewing: you are a representative of the + Rust project, so it is expected that you will go above and beyond + when it comes to the [Code of Conduct]. + +[Code of Conduct]: https://www.rust-lang.org/en-US/conduct.html + +### high-five + +Once you have r+ rights, you can also be added to the high-five +rotation. high-five is the bot that assigns incoming PRs to +reviewers. If you are added, you will be randomly selected to review +PRs. If you find you are assigned a PR that you don't feel comfortable +reviewing, you can also leave a comment like `r? @so-and-so` to assign +to someone else — if you don't know who to request, just write `r? +@nikomatsakis for reassignment` and @nikomatsakis will pick someone +for you. + +[hi5]: https://github.com/rust-highfive + +Getting on the high-five list is much appreciated as it lowers the +review burden for all of us! However, if you don't have time to give +people timely feedback on their PRs, it may be better that you don't +get on the list. + +### Full team membership + +Full team membership is typically extended once someone made many +contributions to the Rust compiler over time, ideally (but not +necessarily) to multiple areas. Sometimes this might be implementing a +new feature, but it is also important — perhaps more important! — to +have time and willingness to help out with general upkeep such as +bugfixes, tracking regressions, and other less glamorous work. diff --git a/src/doc/rustc-guide/src/compiletest.md b/src/doc/rustc-guide/src/compiletest.md new file mode 100644 index 0000000000..3cfb943b07 --- /dev/null +++ b/src/doc/rustc-guide/src/compiletest.md @@ -0,0 +1,226 @@ +# `compiletest` + +## Introduction + +`compiletest` is the main test harness of the Rust test suite. It allows +test authors to organize large numbers of tests (the Rust compiler has many +thousands), efficient test execution (parallel execution is supported), and +allows the test author to configure behavior and expected results of both +individual and groups of tests. + +`compiletest` tests may check test code for success, for failure or in some +cases, even failure to compile. Tests are typically organized as a Rust source +file with annotations in comments before and/or within the test code, which +serve to direct `compiletest` on if or how to run the test, what behavior to +expect, and more. If you are unfamiliar with the compiler testing framework, +see [this chapter](./tests/intro.html) for additional background. + +The tests themselves are typically (but not always) organized into +"suites" – for example, `run-pass`, a folder representing tests that should +succeed, `run-fail`, a folder holding tests that should compile successfully, +but return a failure (non-zero status), `compile-fail`, a folder holding tests +that should fail to compile, and many more. The various suites are defined in +[src/tools/compiletest/src/common.rs][common] in the `pub struct Config` +declaration. And a very good introduction to the different suites of compiler +tests along with details about them can be found in [Adding new +tests](./tests/adding.html). + +## Adding a new test file + +Briefly, simply create your new test in the appropriate location under +[src/test][test]. No registration of test files is necessary as `compiletest` +will scan the [src/test][test] subfolder recursively, and will execute any Rust +source files it finds as tests. See [`Adding new tests`](./tests/adding.html) +for a complete guide on how to adding new tests. + +## Header Commands + +Source file annotations which appear in comments near the top of the source +file *before* any test code are known as header commands. These commands can +instruct `compiletest` to ignore this test, set expectations on whether it is +expected to succeed at compiling, or what the test's return code is expected to +be. Header commands (and their inline counterparts, Error Info commands) are +described more fully +[here](./tests/adding.html#header-commands-configuring-rustc). + +### Adding a new header command + +Header commands are defined in the `TestProps` struct in +[src/tools/compiletest/src/header.rs][header]. At a high level, there are +dozens of test properties defined here, all set to default values in the +`TestProp` struct's `impl` block. Any test can override this default value by +specifying the property in question as header command as a comment (`//`) in +the test source file, before any source code. + +#### Using a header command + +Here is an example, specifying the `must-compile-successfully` header command, +which takes no arguments, followed by the `failure-status` header command, +which takes a single argument (which, in this case is a value of 1). +`failure-status` is instructing `compiletest` to expect a failure status of 1 +(rather than the current Rust default of 101 at the time of this writing). The +header command and the argument list (if present) are typically separated by a +colon: + +```rust,ignore +// must-compile-successfully +// failure-status: 1 + +#![feature(termination_trait)] + +use std::io::{Error, ErrorKind}; + +fn main() -> Result<(), Box> { + Err(Box::new(Error::new(ErrorKind::Other, "returned Box from main()"))) +} +``` + +#### Adding a new header command property + +One would add a new header command if there is a need to define some test +property or behavior on an individual, test-by-test basis. A header command +property serves as the header command's backing store (holds the command's +current value) at runtime. + +To add a new header command property: + 1. Look for the `pub struct TestProps` declaration in + [src/tools/compiletest/src/header.rs][header] and add the new public + property to the end of the declaration. + 2. Look for the `impl TestProps` implementation block immediately following + the struct declaration and initialize the new property to its default + value. + +#### Adding a new header command parser + +When `compiletest` encounters a test file, it parses the file a line at a time +by calling every parser defined in the `Config` struct's implementation block, +also in [src/tools/compiletest/src/header.rs][header] (note the `Config` +struct's declaration block is found in +[src/tools/compiletest/src/common.rs][common]. `TestProps`'s `load_from()` +method will try passing the current line of text to each parser, which, in turn +typically checks to see if the line begins with a particular commented (`//`) +header command such as `// must-compile-successfully` or `// failure-status`. +Whitespace after the comment marker is optional. + +Parsers will override a given header command property's default value merely by +being specified in the test file as a header command or by having a parameter +value specified in the test file, depending on the header command. + +Parsers defined in `impl Config` are typically named `parse_` +(note kebab-case `` transformed to snake-case +``). `impl Config` also defines several 'low-level' parsers +which make it simple to parse common patterns like simple presence or not +(`parse_name_directive()`), header-command:parameter(s) +(`parse_name_value_directive()`), optional parsing only if a particular `cfg` +attribute is defined (`has_cfg_prefix()`) and many more. The low-level parsers +are found near the end of the `impl Config` block; be sure to look through them +and their associated parsers immediately above to see how they are used to +avoid writing additional parsing code unnecessarily. + +As a concrete example, here is the implementation for the +`parse_failure_status()` parser, in +[src/tools/compiletest/src/header.rs][header]: + +```diff +@@ -232,6 +232,7 @@ pub struct TestProps { + // customized normalization rules + pub normalize_stdout: Vec<(String, String)>, + pub normalize_stderr: Vec<(String, String)>, ++ pub failure_status: i32, + } + + impl TestProps { +@@ -260,6 +261,7 @@ impl TestProps { + run_pass: false, + normalize_stdout: vec![], + normalize_stderr: vec![], ++ failure_status: 101, + } + } + +@@ -383,6 +385,10 @@ impl TestProps { + if let Some(rule) = config.parse_custom_normalization(ln, "normalize-stderr") { + self.normalize_stderr.push(rule); + } ++ ++ if let Some(code) = config.parse_failure_status(ln) { ++ self.failure_status = code; ++ } + }); + + for key in &["RUST_TEST_NOCAPTURE", "RUST_TEST_THREADS"] { +@@ -488,6 +494,13 @@ impl Config { + self.parse_name_directive(line, "pretty-compare-only") + } + ++ fn parse_failure_status(&self, line: &str) -> Option { ++ match self.parse_name_value_directive(line, "failure-status") { ++ Some(code) => code.trim().parse::().ok(), ++ _ => None, ++ } ++ } +``` + +## Implementing the behavior change + +When a test invokes a particular header command, it is expected that some +behavior will change as a result. What behavior, obviously, will depend on the +purpose of the header command. In the case of `failure-status`, the behavior +that changes is that `compiletest` expects the failure code defined by the +header command invoked in the test, rather than the default value. + +Although specific to `failure-status` (as every header command will have a +different implementation in order to invoke behavior change) perhaps it is +helpful to see the behavior change implementation of one case, simply as an +example. To implement `failure-status`, the `check_correct_failure_status()` +function found in the `TestCx` implementation block, located in +[src/tools/compiletest/src/runtest.rs](https://github.com/rust-lang/rust/tree/master/src/tools/compiletest/src/runtest.rs), +was modified as per below: + +```diff +@@ -295,11 +295,14 @@ impl<'test> TestCx<'test> { + } + + fn check_correct_failure_status(&self, proc_res: &ProcRes) { +- // The value the rust runtime returns on failure +- const RUST_ERR: i32 = 101; +- if proc_res.status.code() != Some(RUST_ERR) { ++ let expected_status = Some(self.props.failure_status); ++ let received_status = proc_res.status.code(); ++ ++ if expected_status != received_status { + self.fatal_proc_rec( +- &format!("failure produced the wrong error: {}", proc_res.status), ++ &format!("Error: expected failure status ({:?}) but received status {:?}.", ++ expected_status, ++ received_status), + proc_res, + ); + } +@@ -320,7 +323,6 @@ impl<'test> TestCx<'test> { + ); + + let proc_res = self.exec_compiled_test(); +- + if !proc_res.status.success() { + self.fatal_proc_rec("test run failed!", &proc_res); + } +@@ -499,7 +501,6 @@ impl<'test> TestCx<'test> { + expected, + actual + ); +- panic!(); + } + } +``` +Note the use of `self.props.failure_status` to access the header command +property. In tests which do not specify the failure status header command, +`self.props.failure_status` will evaluate to the default value of 101 at the +time of this writing. But for a test which specifies a header command of, for +example, `// failure-status: 1`, `self.props.failure_status` will evaluate to +1, as `parse_failure_status()` will have overridden the `TestProps` default +value, for that test specifically. + +[test]: https://github.com/rust-lang/rust/tree/master/src/test +[header]: https://github.com/rust-lang/rust/tree/master/src/tools/compiletest/src/header.rs +[common]: https://github.com/rust-lang/rust/tree/master/src/tools/compiletest/src/common.rs diff --git a/src/doc/rustc-guide/src/const-eval.md b/src/doc/rustc-guide/src/const-eval.md new file mode 100644 index 0000000000..1f801fb22b --- /dev/null +++ b/src/doc/rustc-guide/src/const-eval.md @@ -0,0 +1,38 @@ +# Constant Evaluation + +Constant evaluation is the process of computing values at compile time. For a +specific item (constant/static/array length) this happens after the MIR for the +item is borrow-checked and optimized. In many cases trying to const evaluate an +item will trigger the computation of its MIR for the first time. + +Prominent examples are + +* The initializer of a `static` +* Array length + * needs to be known to reserve stack or heap space +* Enum variant discriminants + * needs to be known to prevent two variants from having the same + discriminant +* Patterns + * need to be known to check for overlapping patterns + +Additionally constant evaluation can be used to reduce the workload or binary +size at runtime by precomputing complex operations at compiletime and only +storing the result. + +Constant evaluation can be done by calling the `const_eval` query of `TyCtxt`. + +The `const_eval` query takes a [`ParamEnv`](./param_env.html) of environment in +which the constant is evaluated (e.g. the function within which the constant is +used) and a `GlobalId`. The `GlobalId` is made up of an +`Instance` referring to a constant or static or of an +`Instance` of a function and an index into the function's `Promoted` table. + +Constant evaluation returns a `Result` with either the error, or the simplest +representation of the constant. "simplest" meaning if it is representable as an +integer or fat pointer, it will directly yield the value (via `Value::ByVal` or +`Value::ByValPair`), instead of referring to the [`miri`](./miri.html) virtual +memory allocation (via `Value::ByRef`). This means that the `const_eval` +function cannot be used to create miri-pointers to the evaluated constant or +static. If you need that, you need to directly work with the functions in +[src/librustc_mir/const_eval.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/const_eval/index.html). diff --git a/src/doc/rustc-guide/src/conventions.md b/src/doc/rustc-guide/src/conventions.md new file mode 100644 index 0000000000..d392ebd572 --- /dev/null +++ b/src/doc/rustc-guide/src/conventions.md @@ -0,0 +1,134 @@ +This file offers some tips on the coding conventions for rustc. This +chapter covers [formatting](#formatting), [coding for correctness](#cc), +[using crates from crates.io](#cio), and some tips on +[structuring your PR for easy review](#er). + + + +# Formatting and the tidy script + +rustc is slowly moving towards the [Rust standard coding style][fmt]; +at the moment, however, it follows a rather more *chaotic* style. We +do have some mandatory formatting conventions, which are automatically +enforced by a script we affectionately call the "tidy" script. The +tidy script runs automatically when you do `./x.py test` and can be run +in isolation with `./x.py test src/tools/tidy`. + +[fmt]: https://github.com/rust-lang-nursery/fmt-rfcs + + + +### Copyright notice + +Some existing files begin with a copyright and license notice. Please omit this +notice for new files licensed under the standard terms (dual MIT/Apache-2.0). +For existing files, the year at the top is not meaningful: copyright +protections are in fact automatic from the moment of authorship. We do not +typically edit the years on existing files. + +## Line length + +Lines should be at most 100 characters. It's even better if you can +keep things to 80. + +**Ignoring the line length limit.** Sometimes – in particular for +tests – it can be necessary to exempt yourself from this limit. In +that case, you can add a comment towards the top of the file (after +the copyright notice) like so: + +```rust +// ignore-tidy-linelength +``` + +## Tabs vs spaces + +Prefer 4-space indent. + + + +# Coding for correctness + +Beyond formatting, there are a few other tips that are worth +following. + +## Prefer exhaustive matches + +Using `_` in a match is convenient, but it means that when new +variants are added to the enum, they may not get handled correctly. +Ask yourself: if a new variant were added to this enum, what's the +chance that it would want to use the `_` code, versus having some +other treatment? Unless the answer is "low", then prefer an +exhaustive match. (The same advice applies to `if let` and `while +let`, which are effectively tests for a single variant.) + +## Use "TODO" comments for things you don't want to forget + +As a useful tool to yourself, you can insert a `// TODO` comment +for something that you want to get back to before you land your PR: + +```rust,ignore +fn do_something() { + if something_else { + unimplemented!(); // TODO write this + } +} +``` + +The tidy script will report an error for a `// TODO` comment, so this +code would not be able to land until the TODO is fixed (or removed). + +This can also be useful in a PR as a way to signal from one commit that you are +leaving a bug that a later commit will fix: + +```rust,ignore +if foo { + return true; // TODO wrong, but will be fixed in a later commit +} +``` + + + +# Using crates from crates.io + +It is allowed to use crates from crates.io, though external +dependencies should not be added gratuitously. All such crates must +have a suitably permissive license. There is an automatic check which +inspects the Cargo metadata to ensure this. + + + +# How to structure your PR + +How you prepare the commits in your PR can make a big difference for the +reviewer. Here are some tips. + +**Isolate "pure refactorings" into their own commit.** For example, if +you rename a method, then put that rename into its own commit, along +with the renames of all the uses. + +**More commits is usually better.** If you are doing a large change, +it's almost always better to break it up into smaller steps that can +be independently understood. The one thing to be aware of is that if +you introduce some code following one strategy, then change it +dramatically (versus adding to it) in a later commit, that +'back-and-forth' can be confusing. + +**If you run rustfmt and the file was not already formatted, isolate +that into its own commit.** This is really the same as the previous +rule, but it's worth highlighting. It's ok to rustfmt files, but since +we do not currently run rustfmt all the time, that can introduce a lot +of noise into your commit. Please isolate that into its own +commit. This also makes rebases a lot less painful, since rustfmt +tends to cause a lot of merge conflicts, and having those isolated +into their own commit makes them easier to resolve. + +**No merges.** We do not allow merge commits into our history, other +than those by bors. If you get a merge conflict, rebase instead via a +command like `git rebase -i rust-lang/master` (presuming you use the +name `rust-lang` for your remote). + +**Individual commits do not have to build (but it's nice).** We do not +require that every intermediate commit successfully builds – we only +expect to be able to bisect at a PR level. However, if you *can* make +individual commits build, that is always helpful. + diff --git a/src/doc/rustc-guide/src/diag.md b/src/doc/rustc-guide/src/diag.md new file mode 100644 index 0000000000..936420ab6e --- /dev/null +++ b/src/doc/rustc-guide/src/diag.md @@ -0,0 +1,308 @@ +# Emitting Diagnostics + +A lot of effort has been put into making `rustc` have great error messages. +This chapter is about how to emit compile errors and lints from the compiler. + +## `Span` + +[`Span`][span] is the primary data structure in `rustc` used to represent a +location in the code being compiled. `Span`s are attached to most constructs in +HIR and MIR, allowing for more informative error reporting. + +[span]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.Span.html + +A `Span` can be looked up in a [`SourceMap`][sourcemap] to get a "snippet" +useful for displaying errors with [`span_to_snippet`][sptosnip] and other +similar methods on the `SourceMap`. + +[sourcemap]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceMap.html +[sptosnip]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceMap.html#method.span_to_snippet + +## Error messages + +The [`rustc_errors`][errors] crate defines most of the utilities used for +reporting errors. + +[errors]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/index.html + +[`Session`][session] and [`ParseSess`][parsesses] have +methods (or fields with methods) that allow reporting errors. These methods +usually have names like `span_err` or `struct_span_err` or `span_warn`, etc... +There are lots of them; they emit different types of "errors", such as +warnings, errors, fatal errors, suggestions, etc. + +[parsesses]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/struct.ParseSess.html +[session]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/session/struct.Session.html + +In general, there are two class of such methods: ones that emit an error +directly and ones that allow finer control over what to emit. For example, +[`span_err`][spanerr] emits the given error message at the given `Span`, but +[`struct_span_err`][strspanerr] instead returns a +[`DiagnosticBuilder`][diagbuild]. + +`DiagnosticBuilder` allows you to add related notes and suggestions to an error +before emitting it by calling the [`emit`][emit] method. (Failing to either +emit or [cancel][cancel] a `DiagnosticBuilder` will result in an ICE.) See the +[docs][diagbuild] for more info on what you can do. + +[spanerr]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/session/struct.Session.html#method.span_err +[strspanerr]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/session/struct.Session.html#method.struct_span_err +[diagbuild]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/diagnostic_builder/struct.DiagnosticBuilder.html +[emit]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/diagnostic_builder/struct.DiagnosticBuilder.html#method.emit +[cancel]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/struct.Diagnostic.html#method.cancel + +```rust,ignore +// Get a DiagnosticBuilder. This does _not_ emit an error yet. +let mut err = sess.struct_span_err(sp, "oh no! this is an error!"); + +// In some cases, you might need to check if `sp` is generated by a macro to +// avoid printing weird errors about macro-generated code. + +if let Ok(snippet) = sess.source_map().span_to_snippet(sp) { + // Use the snippet to generate a suggested fix + err.span_suggestion(suggestion_sp, "try using a qux here", format!("qux {}", snip)); +} else { + // If we weren't able to generate a snippet, then emit a "help" message + // instead of a concrete "suggestion". In practice this is unlikely to be + // reached. + err.span_help(suggestion_sp, "you could use a qux here instead"); +} + +// emit the error +err.emit(); +``` + +## Suggestions + +In addition to telling the user exactly _why_ their code is wrong, it's +oftentimes furthermore possible to tell them how to fix it. To this end, +`DiagnosticBuilder` offers a structured suggestions API, which formats code +suggestions pleasingly in the terminal, or (when the `--error-format json` flag +is passed) as JSON for consumption by tools, most notably the [Rust Language +Server][rls] and [`rustfix`][rustfix]. + +[rls]: https://github.com/rust-lang-nursery/rls +[rustfix]: https://github.com/rust-lang-nursery/rustfix + +Not all suggestions should be applied mechanically. Use the +[`span_suggestion_with_applicability`][sswa] method of `DiagnosticBuilder` to +make a suggestion while providing a hint to tools whether the suggestion is +mechanically applicable or not. + +[sswa]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/struct.DiagnosticBuilder.html#method.span_suggestion_with_applicability + +For example, to make our `qux` suggestion machine-applicable, we would do: + +```rust,ignore +let mut err = sess.struct_span_err(sp, "oh no! this is an error!"); + +if let Ok(snippet) = sess.source_map().span_to_snippet(sp) { + // Add applicability info! + err.span_suggestion_with_applicability( + suggestion_sp, + "try using a qux here", + format!("qux {}", snip), + Applicability::MachineApplicable, + ); +} else { + err.span_help(suggestion_sp, "you could use a qux here instead"); +} + +err.emit(); +``` + +This might emit an error like + +```console +$ rustc mycode.rs +error[E0999]: oh no! this is an error! + --> mycode.rs:3:5 + | +3 | sad() + | ^ help: try using a qux here: `qux sad()` + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0999`. +``` + +In some cases, like when the suggestion spans multiple lines or when there are +multiple suggestions, the suggestions are displayed on their own: + +```console +error[E0999]: oh no! this is an error! + --> mycode.rs:3:5 + | +3 | sad() + | ^ +help: try using a qux here: + | +3 | qux sad() + | ^^^ + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0999`. +``` + +There are a few other [`Applicability`][appl] possibilities: + +- `MachineApplicable`: Can be applied mechanically. +- `HasPlaceholders`: Cannot be applied mechanically because it has placeholder + text in the suggestions. For example, "Try adding a type: \`let x: + \\`". +- `MaybeIncorrect`: Cannot be applied mechanically because the suggestion may + or may not be a good one. +- `Unspecified`: Cannot be applied mechanically because we don't know which + of the above cases it falls into. + +[appl]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/enum.Applicability.html + +## Lints + +The compiler linting infrastructure is defined in the [`rustc::lint`][rlint] +module. + +[rlint]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/lint/index.html + +### Declaring a lint + +The built-in compiler lints are defined in the [`rustc_lint`][builtin] +crate. + +[builtin]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_lint/index.html + +Each lint is defined as a `struct` that implements the `LintPass` `trait`. The +trait implementation allows you to check certain syntactic constructs the +linter walks the source code. You can then choose to emit lints in a very +similar way to compile errors. Finally, you register the lint to actually get +it to be run by the compiler by using the `declare_lint!` macro. + +For example, the following lint checks for uses +of `while true { ... }` and suggests using `loop { ... }` instead. + +```rust,ignore +// Declare a lint called `WHILE_TRUE` +declare_lint! { + WHILE_TRUE, + + // warn-by-default + Warn, + + // This string is the lint description + "suggest using `loop { }` instead of `while true { }`" +} + +// Define a struct and `impl LintPass` for it. +#[derive(Copy, Clone)] +pub struct WhileTrue; + +impl LintPass for WhileTrue { + fn get_lints(&self) -> LintArray { + lint_array!(WHILE_TRUE) + } +} + +// LateLintPass has lots of methods. We only override the definition of +// `check_expr` for this lint because that's all we need, but you could +// override other methods for your own lint. See the rustc docs for a full +// list of methods. +impl<'a, 'tcx> LateLintPass<'a, 'tcx> for WhileTrue { + fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { + if let hir::ExprWhile(ref cond, ..) = e.node { + if let hir::ExprLit(ref lit) = cond.node { + if let ast::LitKind::Bool(true) = lit.node { + if lit.span.ctxt() == SyntaxContext::empty() { + let msg = "denote infinite loops with `loop { ... }`"; + let condition_span = cx.tcx.sess.source_map().def_span(e.span); + let mut err = cx.struct_span_lint(WHILE_TRUE, condition_span, msg); + err.span_suggestion_short(condition_span, "use `loop`", "loop".to_owned()); + err.emit(); + } + } + } + } + } +} +``` + +### Edition-gated Lints + +Sometimes we want to change the behavior of a lint in a new edition. To do this, +we just add the transition to our invocation of `declare_lint!`: + +```rust,ignore +declare_lint! { + pub ANONYMOUS_PARAMETERS, + Allow, + "detects anonymous parameters", + Edition::Edition2018 => Warn, +} +``` + +This makes the `ANONYMOUS_PARAMETERS` lint allow-by-default in the 2015 edition +but warn-by-default in the 2018 edition. + +Lints that represent an incompatibility (i.e. error) in the upcoming edition +should also be registered as `FutureIncompatibilityLint`s in +[`register_builtins`][rbuiltins] function in [`rustc_lint::lib`][builtin]. + +### Lint Groups + +Lints can be turned on in groups. These groups are declared in the +[`register_builtins`][rbuiltins] function in [`rustc_lint::lib`][builtin]. The +`add_lint_group!` macro is used to declare a new group. + +[rbuiltins]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_lint/fn.register_builtins.html + +For example, + +```rust,ignore + add_lint_group!(sess, + "nonstandard_style", + NON_CAMEL_CASE_TYPES, + NON_SNAKE_CASE, + NON_UPPER_CASE_GLOBALS); +``` + +This defines the `nonstandard_style` group which turns on the listed lints. A +user can turn on these lints with a `!#[warn(nonstandard_style)]` attribute in +the source code, or by passing `-W nonstandard-style` on the command line. + +### Linting early in the compiler + +On occasion, you may need to define a lint that runs before the linting system +has been initialized (e.g. during parsing or macro expansion). This is +problematic because we need to have computed lint levels to know whether we +should emit a warning or an error or nothing at all. + +To solve this problem, we buffer the lints until the linting system is +processed. [`Session`][sessbl] and [`ParseSess`][parsebl] both have +`buffer_lint` methods that allow you to buffer a lint for later. The linting +system automatically takes care of handling buffered lints later. + +[sessbl]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/session/struct.Session.html#method.buffer_lint +[parsebl]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/struct.ParseSess.html#method.buffer_lint + +Thus, to define a lint that runs early in the compilation, one defines a lint +like normal but invokes the lint with `buffer_lint`. + +#### Linting even earlier in the compiler + +The parser (`libsyntax`) is interesting in that it cannot have dependencies on +any of the other `librustc*` crates. In particular, it cannot depend on +`librustc::lint` or `librustc_lint`, where all of the compiler linting +infrastructure is defined. That's troublesome! + +To solve this, `libsyntax` defines its own buffered lint type, which +`ParseSess::buffer_lint` uses. After macro expansion, these buffered lints are +then dumped into the `Session::buffered_lints` used by the rest of the compiler. + +Usage for buffered lints in `libsyntax` is pretty much the same as the rest of +the compiler with one exception because we cannot import the `LintId`s for +lints we want to emit. Instead, the [`BufferedEarlyLintId`] type is used. If you +are defining a new lint, you will want to add an entry to this enum. Then, add +an appropriate mapping to the body of [`Lint::from_parser_lint_id`][fplid]. + +[`BufferedEarlyLintId`]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/early_buffered_lints/enum.BufferedEarlyLintId.html +[fplid]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/lint/struct.Lint.html#from_parser_lint_id diff --git a/src/doc/rustc-guide/src/existential-types.md b/src/doc/rustc-guide/src/existential-types.md new file mode 100644 index 0000000000..ef20167c18 --- /dev/null +++ b/src/doc/rustc-guide/src/existential-types.md @@ -0,0 +1,48 @@ +# Existential Types + +Existential types are essentially strong type aliases which only expose +a specific set of traits as their interface and the concrete type in the +background is inferred from a certain set of use sites of the existential +type. + +In the language they are expressed via + +```rust,ignore +existential type Foo: Bar; +``` + +This is in existential type named `Foo` which can be interacted with via +the `Bar` trait's interface. + +Since there needs to be a concrete background type, you can currently +express that type by using the existential type in a "defining use site". + +```rust,ignore +struct Struct; +impl Bar for Struct { /* stuff */ } +fn foo() -> Foo { + Struct +} +``` + +Any other "defining use site" needs to produce the exact same type. + +## Defining use site(s) + +Currently only the return value of a function inside can +be a defining use site of an existential type (and only if the return +type of that function contains the existential type). + +The defining use of an existential type can be any code *within* the parent +of the existential type definition. This includes any siblings of the +existential type and all children of the siblings. + +The initiative for *"not causing fatal brain damage to developers due to +accidentally running infinite loops in their brain while trying to +comprehend what the type system is doing"* has decided to disallow children +of existential types to be defining use sites. + +### Associated existential types + +Associated existential types can be defined by any other associated item +on the same trait `impl` or a child of these associated items. diff --git a/src/doc/rustc-guide/src/high-level-overview.md b/src/doc/rustc-guide/src/high-level-overview.md new file mode 100644 index 0000000000..93b5592c3a --- /dev/null +++ b/src/doc/rustc-guide/src/high-level-overview.md @@ -0,0 +1,142 @@ +# High-level overview of the compiler source + +## Crate structure + +The main Rust repository consists of a `src` directory, under which +there live many crates. These crates contain the sources for the +standard library and the compiler. This document, of course, focuses +on the latter. + +Rustc consists of a number of crates, including `syntax`, +`rustc`, `rustc_back`, `rustc_codegen`, `rustc_driver`, and +many more. The source for each crate can be found in a directory +like `src/libXXX`, where `XXX` is the crate name. + +(N.B. The names and divisions of these crates are not set in +stone and may change over time. For the time being, we tend towards a +finer-grained division to help with compilation time, though as incremental +compilation improves, that may change.) + +The dependency structure of these crates is roughly a diamond: + +```text + rustc_driver + / | \ + / | \ + / | \ + / v \ +rustc_codegen rustc_borrowck ... rustc_metadata + \ | / + \ | / + \ | / + \ v / + rustc + | + v + syntax + / \ + / \ + syntax_pos syntax_ext +``` + +The `rustc_driver` crate, at the top of this lattice, is effectively +the "main" function for the rust compiler. It doesn't have much "real +code", but instead ties together all of the code defined in the other +crates and defines the overall flow of execution. (As we transition +more and more to the [query model], however, the +"flow" of compilation is becoming less centrally defined.) + +At the other extreme, the `rustc` crate defines the common and +pervasive data structures that all the rest of the compiler uses +(e.g. how to represent types, traits, and the program itself). It +also contains some amount of the compiler itself, although that is +relatively limited. + +Finally, all the crates in the bulge in the middle define the bulk of +the compiler – they all depend on `rustc`, so that they can make use +of the various types defined there, and they export public routines +that `rustc_driver` will invoke as needed (more and more, what these +crates export are "query definitions", but those are covered later +on). + +Below `rustc` lie various crates that make up the parser and error +reporting mechanism. For historical reasons, these crates do not have +the `rustc_` prefix, but they are really just as much an internal part +of the compiler and not intended to be stable (though they do wind up +getting used by some crates in the wild; a practice we hope to +gradually phase out). + +Each crate has a `README.md` file that describes, at a high-level, +what it contains, and tries to give some kind of explanation (some +better than others). + +## The main stages of compilation + +The Rust compiler is in a bit of transition right now. It used to be a +purely "pass-based" compiler, where we ran a number of passes over the +entire program, and each did a particular check of transformation. We +are gradually replacing this pass-based code with an alternative setup +based on on-demand **queries**. In the query-model, we work backwards, +executing a *query* that expresses our ultimate goal (e.g. "compile +this crate"). This query in turn may make other queries (e.g. "get me +a list of all modules in the crate"). Those queries make other queries +that ultimately bottom out in the base operations, like parsing the +input, running the type-checker, and so forth. This on-demand model +permits us to do exciting things like only do the minimal amount of +work needed to type-check a single function. It also helps with +incremental compilation. (For details on defining queries, check out +the [query model].) + +Regardless of the general setup, the basic operations that the +compiler must perform are the same. The only thing that changes is +whether these operations are invoked front-to-back, or on demand. In +order to compile a Rust crate, these are the general steps that we +take: + +1. **Parsing input** + - this processes the `.rs` files and produces the AST + ("abstract syntax tree") + - the AST is defined in `src/libsyntax/ast.rs`. It is intended to match the lexical + syntax of the Rust language quite closely. +2. **Name resolution, macro expansion, and configuration** + - once parsing is complete, we process the AST recursively, resolving + paths and expanding macros. This same process also processes `#[cfg]` + nodes, and hence may strip things out of the AST as well. +3. **Lowering to HIR** + - Once name resolution completes, we convert the AST into the HIR, + or "[high-level intermediate representation]". The HIR is defined in + `src/librustc/hir/`; that module also includes the [lowering] code. + - The HIR is a lightly desugared variant of the AST. It is more processed + than the AST and more suitable for the analyses that follow. + It is **not** required to match the syntax of the Rust language. + - As a simple example, in the **AST**, we preserve the parentheses + that the user wrote, so `((1 + 2) + 3)` and `1 + 2 + 3` parse + into distinct trees, even though they are equivalent. In the + HIR, however, parentheses nodes are removed, and those two + expressions are represented in the same way. +3. **Type-checking and subsequent analyses** + - An important step in processing the HIR is to perform type + checking. This process assigns types to every HIR expression, + for example, and also is responsible for resolving some + "type-dependent" paths, such as field accesses (`x.f` – we + can't know what field `f` is being accessed until we know the + type of `x`) and associated type references (`T::Item` – we + can't know what type `Item` is until we know what `T` is). + - Type checking creates "side-tables" (`TypeckTables`) that include + the types of expressions, the way to resolve methods, and so forth. + - After type-checking, we can do other analyses, such as privacy checking. +4. **Lowering to MIR and post-processing** + - Once type-checking is done, we can lower the HIR into MIR ("middle IR"), + which is a **very** desugared version of Rust, well suited to borrowck + but also to certain high-level optimizations. +5. **Translation to LLVM and LLVM optimizations** + - From MIR, we can produce LLVM IR. + - LLVM then runs its various optimizations, which produces a number of + `.o` files (one for each "codegen unit"). +6. **Linking** + - Finally, those `.o` files are linked together. + + +[query model]: query.html +[high-level intermediate representation]: hir.html +[lowering]: lowering.html \ No newline at end of file diff --git a/src/doc/rustc-guide/src/hir.md b/src/doc/rustc-guide/src/hir.md new file mode 100644 index 0000000000..e6bca7f37d --- /dev/null +++ b/src/doc/rustc-guide/src/hir.md @@ -0,0 +1,159 @@ +# The HIR + +The HIR – "High-Level Intermediate Representation" – is the primary IR used +in most of rustc. It is a compiler-friendly representation of the abstract +syntax tree (AST) that is generated after parsing, macro expansion, and name +resolution (see [Lowering](./lowering.html) for how the HIR is created). +Many parts of HIR resemble Rust surface syntax quite closely, with +the exception that some of Rust's expression forms have been desugared away. +For example, `for` loops are converted into a `loop` and do not appear in +the HIR. This makes HIR more amenable to analysis than a normal AST. + +This chapter covers the main concepts of the HIR. + +You can view the HIR representation of your code by passing the +`-Zunpretty=hir-tree` flag to rustc: + +```bash +> cargo rustc -- -Zunpretty=hir-tree +``` + +### Out-of-band storage and the `Crate` type + +The top-level data-structure in the HIR is the [`Crate`], which stores +the contents of the crate currently being compiled (we only ever +construct HIR for the current crate). Whereas in the AST the crate +data structure basically just contains the root module, the HIR +`Crate` structure contains a number of maps and other things that +serve to organize the content of the crate for easier access. + +[`Crate`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.Crate.html + +For example, the contents of individual items (e.g. modules, +functions, traits, impls, etc) in the HIR are not immediately +accessible in the parents. So, for example, if there is a module item +`foo` containing a function `bar()`: + +```rust +mod foo { + fn bar() { } +} +``` + +then in the HIR the representation of module `foo` (the [`Mod`] +struct) would only have the **`ItemId`** `I` of `bar()`. To get the +details of the function `bar()`, we would lookup `I` in the +`items` map. + +[`Mod`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.Mod.html + +One nice result from this representation is that one can iterate +over all items in the crate by iterating over the key-value pairs +in these maps (without the need to trawl through the whole HIR). +There are similar maps for things like trait items and impl items, +as well as "bodies" (explained below). + +The other reason to set up the representation this way is for better +integration with incremental compilation. This way, if you gain access +to an [`&hir::Item`] (e.g. for the mod `foo`), you do not immediately +gain access to the contents of the function `bar()`. Instead, you only +gain access to the **id** for `bar()`, and you must invoke some +function to lookup the contents of `bar()` given its id; this gives +the compiler a chance to observe that you accessed the data for +`bar()`, and then record the dependency. + +[`&hir::Item`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.Item.html + + + +### Identifiers in the HIR + +Most of the code that has to deal with things in HIR tends not to +carry around references into the HIR, but rather to carry around +*identifier numbers* (or just "ids"). Right now, you will find four +sorts of identifiers in active use: + +- [`DefId`], which primarily names "definitions" or top-level items. + - You can think of a [`DefId`] as being shorthand for a very explicit + and complete path, like `std::collections::HashMap`. However, + these paths are able to name things that are not nameable in + normal Rust (e.g. impls), and they also include extra information + about the crate (such as its version number, as two versions of + the same crate can co-exist). + - A [`DefId`] really consists of two parts, a `CrateNum` (which + identifies the crate) and a `DefIndex` (which indexes into a list + of items that is maintained per crate). +- [`HirId`], which combines the index of a particular item with an + offset within that item. + - the key point of a [`HirId`] is that it is *relative* to some item + (which is named via a [`DefId`]). +- [`BodyId`], this is an absolute identifier that refers to a specific + body (definition of a function or constant) in the crate. It is currently + effectively a "newtype'd" [`NodeId`]. +- [`NodeId`], which is an absolute id that identifies a single node in the HIR + tree. + - While these are still in common use, **they are being slowly phased out**. + - Since they are absolute within the crate, adding a new node anywhere in the + tree causes the [`NodeId`]s of all subsequent code in the crate to change. + This is terrible for incremental compilation, as you can perhaps imagine. + +[`DefId`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/def_id/struct.DefId.html +[`HirId`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.HirId.html +[`BodyId`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.BodyId.html +[`NodeId`]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ast/struct.NodeId.html + +### The HIR Map + +Most of the time when you are working with the HIR, you will do so via +the **HIR Map**, accessible in the tcx via [`tcx.hir`] (and defined in +the [`hir::map`] module). The [HIR map] contains a [number of methods] to +convert between IDs of various kinds and to lookup data associated +with an HIR node. + +[`tcx.hir`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/context/struct.GlobalCtxt.html#structfield.hir +[`hir::map`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/index.html +[HIR map]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html +[number of methods]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#methods + +For example, if you have a [`DefId`], and you would like to convert it +to a [`NodeId`], you can use +[`tcx.hir.as_local_node_id(def_id)`][as_local_node_id]. This returns +an `Option` – this will be `None` if the def-id refers to +something outside of the current crate (since then it has no HIR +node), but otherwise returns `Some(n)` where `n` is the node-id of the +definition. + +[as_local_node_id]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#method.as_local_node_id + +Similarly, you can use [`tcx.hir.find(n)`][find] to lookup the node for a +[`NodeId`]. This returns a `Option>`, where [`Node`] is an enum +defined in the map; by matching on this you can find out what sort of +node the node-id referred to and also get a pointer to the data +itself. Often, you know what sort of node `n` is – e.g. if you know +that `n` must be some HIR expression, you can do +[`tcx.hir.expect_expr(n)`][expect_expr], which will extract and return the +[`&hir::Expr`][Expr], panicking if `n` is not in fact an expression. + +[find]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#method.find +[`Node`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/enum.Node.html +[expect_expr]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#method.expect_expr +[Expr]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.Expr.html + +Finally, you can use the HIR map to find the parents of nodes, via +calls like [`tcx.hir.get_parent_node(n)`][get_parent_node]. + +[get_parent_node]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#method.get_parent_node + +### HIR Bodies + +A [`hir::Body`] represents some kind of executable code, such as the body +of a function/closure or the definition of a constant. Bodies are +associated with an **owner**, which is typically some kind of item +(e.g. an `fn()` or `const`), but could also be a closure expression +(e.g. `|x, y| x + y`). You can use the HIR map to find the body +associated with a given def-id ([`maybe_body_owned_by`]) or to find +the owner of a body ([`body_owner_def_id`]). + +[`hir::Body`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/struct.Body.html +[`maybe_body_owned_by`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#method.maybe_body_owned_by +[`body_owner_def_id`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/hir/map/struct.Map.html#method.body_owner_def_id diff --git a/src/doc/rustc-guide/src/how-to-build-and-run.md b/src/doc/rustc-guide/src/how-to-build-and-run.md new file mode 100644 index 0000000000..89ad471991 --- /dev/null +++ b/src/doc/rustc-guide/src/how-to-build-and-run.md @@ -0,0 +1,334 @@ +# How to build the compiler and run what you built + +The compiler is built using a tool called `x.py`. You will need to +have Python installed to run it. But before we get to that, if you're going to +be hacking on `rustc`, you'll want to tweak the configuration of the compiler. +The default configuration is oriented towards running the compiler as a user, +not a developer. + +### Create a config.toml + +To start, copy [`config.toml.example`] to `config.toml`: + +[`config.toml.example`]: https://github.com/rust-lang/rust/blob/master/config.toml.example + +```bash +> cd $RUST_CHECKOUT +> cp config.toml.example config.toml +``` + +Then you will want to open up the file and change the following +settings (and possibly others, such as `llvm.ccache`): + +```toml +[llvm] +# Enables LLVM assertions, which will check that the LLVM bitcode generated +# by the compiler is internally consistent. These are particularly helpful +# if you edit `codegen`. +assertions = true + +[rust] +# This enables some assertions, but more importantly it enables the `debug!` +# logging macros that are essential for debugging rustc. +debug-assertions = true + +# This will make your build more parallel; it costs a bit of runtime +# performance perhaps (less inlining) but it's worth it. +codegen-units = 0 + +# I always enable full debuginfo, though debuginfo-lines is more important. +debuginfo = true + +# Gives you line numbers for backtraces. +debuginfo-lines = true +``` + +### What is x.py? + +x.py is the script used to orchestrate the tooling in the rustc repository. +It is the script that can build docs, run tests, and compile rustc. +It is the now preferred way to build rustc and it replaces the old makefiles +from before. Below are the different ways to utilize x.py in order to +effectively deal with the repo for various common tasks. + +### Running x.py and building a stage1 compiler + +One thing to keep in mind is that `rustc` is a _bootstrapping_ +compiler. That is, since `rustc` is written in Rust, we need to use an +older version of the compiler to compile the newer version. In +particular, the newer version of the compiler, `libstd`, and other +tooling may use some unstable features internally. The result is that +compiling `rustc` is done in stages: + +- **Stage 0:** the stage0 compiler is usually the current _beta_ compiler + (`x.py` will download it for you); you can configure `x.py` to use something + else, though. +- **Stage 1:** the code in your clone (for new version) is then + compiled with the stage0 compiler to produce the stage1 compiler. + However, it was built with an older compiler (stage0), so to + optimize the stage1 compiler we go to next stage. + - (In theory, the stage1 compiler is functionally identical to the + stage2 compiler, but in practice there are subtle differences. In + particular, the stage1 compiler itself was built by stage0 and + hence not by the source in your working directory: this means that + the symbol names used in the compiler source may not match the + symbol names that would have been made by the stage1 compiler. + This can be important when using dynamic linking (e.g., with + derives. Sometimes this means that some tests don't work when run + with stage1.) +- **Stage 2:** we rebuild our stage1 compiler with itself to produce + the stage2 compiler (i.e. it builds itself) to have all the _latest + optimizations_. (By default, we copy the stage1 libraries for use by + the stage2 compiler, since they ought to be identical.) +- _(Optional)_ **Stage 3**: to sanity check of our new compiler, we + can build the libraries with the stage2 compiler. The result ought + to be identical to before, unless something has broken. + +#### Build Flags + +There are other flags you can pass to the build portion of x.py that can be +beneficial to cutting down compile times or fitting other things you might +need to change. They are: + +```bash +Options: + -v, --verbose use verbose output (-vv for very verbose) + -i, --incremental use incremental compilation + --config FILE TOML configuration file for build + --build BUILD build target of the stage0 compiler + --host HOST host targets to build + --target TARGET target targets to build + --on-fail CMD command to run on failure + --stage N stage to build + --keep-stage N stage to keep without recompiling + --src DIR path to the root of the rust checkout + -j, --jobs JOBS number of jobs to run in parallel + -h, --help print this help message +``` + +For hacking, often building the stage 1 compiler is enough, but for +final testing and release, the stage 2 compiler is used. + +`./x.py check` is really fast to build the rust compiler. +It is, in particular, very useful when you're doing some kind of +"type-based refactoring", like renaming a method, or changing the +signature of some function. + + + +Once you've created a config.toml, you are now ready to run +`x.py`. There are a lot of options here, but let's start with what is +probably the best "go to" command for building a local rust: + +```bash +> ./x.py build -i --stage 1 src/libstd +``` + +This may *look* like it only builds libstd, but that is not the case. +What this command does is the following: + +- Build libstd using the stage0 compiler (using incremental) +- Build librustc using the stage0 compiler (using incremental) + - This produces the stage1 compiler +- Build libstd using the stage1 compiler (cannot use incremental) + +This final product (stage1 compiler + libs built using that compiler) +is what you need to build other rust programs. + +Note that the command includes the `-i` switch. This enables incremental +compilation. This will be used to speed up the first two steps of the process: +in particular, if you make a small change, we ought to be able to use your old +results to make producing the stage1 **compiler** faster. + +Unfortunately, incremental cannot be used to speed up making the +stage1 libraries. This is because incremental only works when you run +the *same compiler* twice in a row. In this case, we are building a +*new stage1 compiler* every time. Therefore, the old incremental +results may not apply. **As a result, you will probably find that +building the stage1 libstd is a bottleneck for you** -- but fear not, +there is a (hacky) workaround. See [the section on "recommended +workflows"](#workflow) below. + +Note that this whole command just gives you a subset of the full rustc +build. The **full** rustc build (what you get if you just say `./x.py +build`) has quite a few more steps: + +- Build librustc and rustc with the stage1 compiler. + - The resulting compiler here is called the "stage2" compiler. +- Build libstd with stage2 compiler. +- Build librustdoc and a bunch of other things with the stage2 compiler. + + + +### Build specific components + + Build only the libcore library + +```bash +> ./x.py build src/libcore +``` + + Build the libcore and libproc_macro library only + +```bash +> ./x.py build src/libcore src/libproc_macro +``` + + Build only libcore up to Stage 1 + +```bash +> ./x.py build src/libcore --stage 1 +``` + +Sometimes you might just want to test if the part you’re working on can +compile. Using these commands you can test that it compiles before doing +a bigger build to make sure it works with the compiler. As shown before +you can also pass flags at the end such as --stage. + + +### Creating a rustup toolchain + +Once you have successfully built rustc, you will have created a bunch +of files in your `build` directory. In order to actually run the +resulting rustc, we recommend creating rustup toolchains. The first +one will run the stage1 compiler (which we built above). The second +will execute the stage2 compiler (which we did not build, but which +you will likely need to build at some point; for example, if you want +to run the entire test suite). + +```bash +> rustup toolchain link stage1 build//stage1 +> rustup toolchain link stage2 build//stage2 +``` + +The `` would typically be one of the following: + +- Linux: `x86_64-unknown-linux-gnu` +- Mac: `x86_64-apple-darwin` +- Windows: `x86_64-pc-windows-msvc` + +Now you can run the rustc you built with. If you run with `-vV`, you +should see a version number ending in `-dev`, indicating a build from +your local environment: + +```bash +> rustc +stage1 -vV +rustc 1.25.0-dev +binary: rustc +commit-hash: unknown +commit-date: unknown +host: x86_64-unknown-linux-gnu +release: 1.25.0-dev +LLVM version: 4.0 +``` + + + +### Suggested workflows for faster builds of the compiler + +There are two workflows that are useful for faster builds of the +compiler. + +**Check, check, and check again.** The first workflow, which is useful +when doing simple refactorings, is to run `./x.py check` +continuously. Here you are just checking that the compiler can +**build**, but often that is all you need (e.g., when renaming a +method). You can then run `./x.py build` when you actually need to +run tests. + +In fact, it is sometimes useful to put off tests even when you are not +100% sure the code will work. You can then keep building up +refactoring commits and only run the tests at some later time. You can +then use `git bisect` to track down **precisely** which commit caused +the problem. A nice side-effect of this style is that you are left +with a fairly fine-grained set of commits at the end, all of which +build and pass tests. This often helps reviewing. + +**Incremental builds with `--keep-stage`.** Sometimes just checking +whether the compiler builds is not enough. A common example is that +you need to add a `debug!` statement to inspect the value of some +state or better understand the problem. In that case, you really need +a full build. By leveraging incremental, though, you can often get +these builds to complete very fast (e.g., around 30 seconds): the only +catch is this requires a bit of fudging and may produce compilers that +don't work (but that is easily detected and fixed). + +The sequence of commands you want is as follows: + +- Initial build: `./x.py build -i --stage 1 src/libstd` + - As [documented above](#command), this will build a functional + stage1 compiler +- Subsequent builds: `./x.py build -i --stage 1 src/libstd --keep-stage 1` + - Note that we added the `--keep-stage 1` flag here + +The effect of `--keep-stage 1` is that we just *assume* that the old +standard library can be re-used. If you are editing the compiler, this +is almost always true: you haven't changed the standard library, after +all. But sometimes, it's not true: for example, if you are editing +the "metadata" part of the compiler, which controls how the compiler +encodes types and other states into the `rlib` files, or if you are +editing things that wind up in the metadata (such as the definition of +the MIR). + +**The TL;DR is that you might get weird behavior from a compile when +using `--keep-stage 1`** -- for example, strange +[ICEs](appendix/glossary.html) or other panics. In that case, you +should simply remove the `--keep-stage 1` from the command and +rebuild. That ought to fix the problem. + +You can also use `--keep-stage 1` when running tests. Something like +this: + +- Initial test run: `./x.py test -i --stage 1 src/test/ui` +- Subsequent test run: `./x.py test -i --stage 1 src/test/ui --keep-stage 1` + +### Other x.py commands + +Here are a few other useful x.py commands. We'll cover some of them in detail +in other sections: + +- Building things: + - `./x.py clean` – clean up the build directory (`rm -rf build` works too, + but then you have to rebuild LLVM) + - `./x.py build --stage 1` – builds everything using the stage 1 compiler, + not just up to libstd + - `./x.py build` – builds the stage2 compiler +- Running tests (see the [section on running tests](./tests/running.html) for + more details): + - `./x.py test --stage 1 src/libstd` – runs the `#[test]` tests from libstd + - `./x.py test --stage 1 src/test/run-pass` – runs the `run-pass` test suite + +### ctags + +One of the challenges with rustc is that the RLS can't handle it, making code +navigation difficult. One solution is to use `ctags`. The following script can +be used to set it up: [https://github.com/nikomatsakis/rust-etags][etags]. + +CTAGS integrates into emacs and vim quite easily. The following can then be +used to build and generate tags: + +```console +$ rust-ctags src/lib* && ./x.py build +``` + +This allows you to do "jump-to-def" with whatever functions were around when +you last built, which is ridiculously useful. + +[etags]: https://github.com/nikomatsakis/rust-etags + +### Cleaning out build directories + +Sometimes you need to start fresh, but this is normally not the case. +If you need to run this then rustbuild is most likely not acting right and +you should file a bug as to what is going wrong. If you do need to clean +everything up then you only need to run one command! + + ```bash + > ./x.py clean + ``` + +### Compiler Documentation + +The documentation for the rust components are found at [rustc doc]. + +[rustc doc]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ diff --git a/src/doc/rustc-guide/src/incrcomp-debugging.md b/src/doc/rustc-guide/src/incrcomp-debugging.md new file mode 100644 index 0000000000..ecb29d544e --- /dev/null +++ b/src/doc/rustc-guide/src/incrcomp-debugging.md @@ -0,0 +1,115 @@ +# Debugging and Testing Dependencies + +## Testing the dependency graph + +There are various ways to write tests against the dependency graph. +The simplest mechanisms are the `#[rustc_if_this_changed]` and +`#[rustc_then_this_would_need]` annotations. These are used in compile-fail +tests to test whether the expected set of paths exist in the dependency graph. +As an example, see `src/test/compile-fail/dep-graph-caller-callee.rs`. + +The idea is that you can annotate a test like: + +```rust,ignore +#[rustc_if_this_changed] +fn foo() { } + +#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK +fn bar() { foo(); } + +#[rustc_then_this_would_need(TypeckTables)] //~ ERROR no path +fn baz() { } +``` + +This will check whether there is a path in the dependency graph from `Hir(foo)` +to `TypeckTables(bar)`. An error is reported for each +`#[rustc_then_this_would_need]` annotation that indicates whether a path +exists. `//~ ERROR` annotations can then be used to test if a path is found (as +demonstrated above). + +## Debugging the dependency graph + +### Dumping the graph + +The compiler is also capable of dumping the dependency graph for your +debugging pleasure. To do so, pass the `-Z dump-dep-graph` flag. The +graph will be dumped to `dep_graph.{txt,dot}` in the current +directory. You can override the filename with the `RUST_DEP_GRAPH` +environment variable. + +Frequently, though, the full dep graph is quite overwhelming and not +particularly helpful. Therefore, the compiler also allows you to filter +the graph. You can filter in three ways: + +1. All edges originating in a particular set of nodes (usually a single node). +2. All edges reaching a particular set of nodes. +3. All edges that lie between given start and end nodes. + +To filter, use the `RUST_DEP_GRAPH_FILTER` environment variable, which should +look like one of the following: + +```text +source_filter // nodes originating from source_filter +-> target_filter // nodes that can reach target_filter +source_filter -> target_filter // nodes in between source_filter and target_filter +``` + +`source_filter` and `target_filter` are a `&`-separated list of strings. +A node is considered to match a filter if all of those strings appear in its +label. So, for example: + +```text +RUST_DEP_GRAPH_FILTER='-> TypeckTables' +``` + +would select the predecessors of all `TypeckTables` nodes. Usually though you +want the `TypeckTables` node for some particular fn, so you might write: + +```text +RUST_DEP_GRAPH_FILTER='-> TypeckTables & bar' +``` + +This will select only the predecessors of `TypeckTables` nodes for functions +with `bar` in their name. + +Perhaps you are finding that when you change `foo` you need to re-type-check +`bar`, but you don't think you should have to. In that case, you might do: + +```text +RUST_DEP_GRAPH_FILTER='Hir & foo -> TypeckTables & bar' +``` + +This will dump out all the nodes that lead from `Hir(foo)` to +`TypeckTables(bar)`, from which you can (hopefully) see the source +of the erroneous edge. + +### Tracking down incorrect edges + +Sometimes, after you dump the dependency graph, you will find some +path that should not exist, but you will not be quite sure how it came +to be. **When the compiler is built with debug assertions,** it can +help you track that down. Simply set the `RUST_FORBID_DEP_GRAPH_EDGE` +environment variable to a filter. Every edge created in the dep-graph +will be tested against that filter – if it matches, a `bug!` is +reported, so you can easily see the backtrace (`RUST_BACKTRACE=1`). + +The syntax for these filters is the same as described in the previous +section. However, note that this filter is applied to every **edge** +and doesn't handle longer paths in the graph, unlike the previous +section. + +Example: + +You find that there is a path from the `Hir` of `foo` to the type +check of `bar` and you don't think there should be. You dump the +dep-graph as described in the previous section and open `dep-graph.txt` +to see something like: + +```text +Hir(foo) -> Collect(bar) +Collect(bar) -> TypeckTables(bar) +``` + +That first edge looks suspicious to you. So you set +`RUST_FORBID_DEP_GRAPH_EDGE` to `Hir&foo -> Collect&bar`, re-run, and +then observe the backtrace. Voila, bug fixed! diff --git a/src/doc/rustc-guide/src/incremental-compilation.md b/src/doc/rustc-guide/src/incremental-compilation.md new file mode 100644 index 0000000000..0a25e16648 --- /dev/null +++ b/src/doc/rustc-guide/src/incremental-compilation.md @@ -0,0 +1,141 @@ +# Incremental compilation + +The incremental compilation scheme is, in essence, a surprisingly +simple extension to the overall query system. We'll start by describing +a slightly simplified variant of the real thing – the "basic algorithm" – +and then describe some possible improvements. + +## The basic algorithm + +The basic algorithm is +called the **red-green** algorithm[^salsa]. The high-level idea is +that, after each run of the compiler, we will save the results of all +the queries that we do, as well as the **query DAG**. The +**query DAG** is a [DAG] that indexes which queries executed which +other queries. So, for example, there would be an edge from a query Q1 +to another query Q2 if computing Q1 required computing Q2 (note that +because queries cannot depend on themselves, this results in a DAG and +not a general graph). + +[DAG]: https://en.wikipedia.org/wiki/Directed_acyclic_graph + +On the next run of the compiler, then, we can sometimes reuse these +query results to avoid re-executing a query. We do this by assigning +every query a **color**: + +- If a query is colored **red**, that means that its result during + this compilation has **changed** from the previous compilation. +- If a query is colored **green**, that means that its result is + the **same** as the previous compilation. + +There are two key insights here: + +- First, if all the inputs to query Q are colored green, then the + query Q **must** result in the same value as last time and hence + need not be re-executed (or else the compiler is not deterministic). +- Second, even if some inputs to a query changes, it may be that it + **still** produces the same result as the previous compilation. In + particular, the query may only use part of its input. + - Therefore, after executing a query, we always check whether it + produced the same result as the previous time. **If it did,** we + can still mark the query as green, and hence avoid re-executing + dependent queries. + +### The try-mark-green algorithm + +At the core of incremental compilation is an algorithm called +"try-mark-green". It has the job of determining the color of a given +query Q (which must not have yet been executed). In cases where Q has +red inputs, determining Q's color may involve re-executing Q so that +we can compare its output, but if all of Q's inputs are green, then we +can conclude that Q must be green without re-executing it or inspecting +its value at all. In the compiler, this allows us to avoid +deserializing the result from disk when we don't need it, and in fact +enables us to sometimes skip *serializing* the result as well +(see the refinements section below). + +Try-mark-green works as follows: + +- First check if the query Q was executed during the previous compilation. + - If not, we can just re-execute the query as normal, and assign it the + color of red. +- If yes, then load the 'dependent queries' of Q. +- If there is a saved result, then we load the `reads(Q)` vector from the + query DAG. The "reads" is the set of queries that Q executed during + its execution. + - For each query R in `reads(Q)`, we recursively demand the color + of R using try-mark-green. + - Note: it is important that we visit each node in `reads(Q)` in same order + as they occurred in the original compilation. See [the section on the + query DAG below](#dag). + - If **any** of the nodes in `reads(Q)` wind up colored **red**, then Q is + dirty. + - We re-execute Q and compare the hash of its result to the hash of the + result from the previous compilation. + - If the hash has not changed, we can mark Q as **green** and return. + - Otherwise, **all** of the nodes in `reads(Q)` must be **green**. In that + case, we can color Q as **green** and return. + + + +### The query DAG + +The query DAG code is stored in +[`src/librustc/dep_graph`][dep_graph]. Construction of the DAG is done +by instrumenting the query execution. + +One key point is that the query DAG also tracks ordering; that is, for +each query Q, we not only track the queries that Q reads, we track the +**order** in which they were read. This allows try-mark-green to walk +those queries back in the same order. This is important because once a +subquery comes back as red, we can no longer be sure that Q will continue +along the same path as before. That is, imagine a query like this: + +```rust,ignore +fn main_query(tcx) { + if tcx.subquery1() { + tcx.subquery2() + } else { + tcx.subquery3() + } +} +``` + +Now imagine that in the first compilation, `main_query` starts by +executing `subquery1`, and this returns true. In that case, the next +query `main_query` executes will be `subquery2`, and `subquery3` will +not be executed at all. + +But now imagine that in the **next** compilation, the input has +changed such that `subquery1` returns **false**. In this case, `subquery2` +would never execute. If try-mark-green were to visit `reads(main_query)` out +of order, however, it might visit `subquery2` before `subquery1`, and hence +execute it. +This can lead to ICEs and other problems in the compiler. + +[dep_graph]: https://github.com/rust-lang/rust/tree/master/src/librustc/dep_graph + +## Improvements to the basic algorithm + +In the description of the basic algorithm, we said that at the end of +compilation we would save the results of all the queries that were +performed. In practice, this can be quite wasteful – many of those +results are very cheap to recompute, and serializing and deserializing +them is not a particular win. In practice, what we would do is to save +**the hashes** of all the subqueries that we performed. Then, in select cases, +we **also** save the results. + +This is why the incremental algorithm separates computing the +**color** of a node, which often does not require its value, from +computing the **result** of a node. Computing the result is done via a simple +algorithm like so: + +- Check if a saved result for Q is available. If so, compute the color of Q. + If Q is green, deserialize and return the saved result. +- Otherwise, execute Q. + - We can then compare the hash of the result and color Q as green if + it did not change. + +# Footnotes + +[^salsa]: I have long wanted to rename it to the Salsa algorithm, but it never caught on. -@nikomatsakis diff --git a/src/doc/rustc-guide/src/lowering.md b/src/doc/rustc-guide/src/lowering.md new file mode 100644 index 0000000000..c3a1a96ccf --- /dev/null +++ b/src/doc/rustc-guide/src/lowering.md @@ -0,0 +1,48 @@ +# Lowering + +The lowering step converts AST to [HIR](hir.html). +This means many structures are removed if they are irrelevant +for type analysis or similar syntax agnostic analyses. Examples +of such structures include but are not limited to + +* Parenthesis + * Removed without replacement, the tree structure makes order explicit +* `for` loops and `while (let)` loops + * Converted to `loop` + `match` and some `let` bindings +* `if let` + * Converted to `match` +* Universal `impl Trait` + * Converted to generic arguments + (but with some flags, to know that the user didn't write them) +* Existential `impl Trait` + * Converted to a virtual `existential type` declaration + +Lowering needs to uphold several invariants in order to not trigger the +sanity checks in `src/librustc/hir/map/hir_id_validator.rs`: + +1. A `HirId` must be used if created. So if you use the `lower_node_id`, + you *must* use the resulting `NodeId` or `HirId` (either is fine, since + any `NodeId`s in the `HIR` are checked for existing `HirId`s) +2. Lowering a `HirId` must be done in the scope of the *owning* item. + This means you need to use `with_hir_id_owner` if you are creating parts + of an item other than the one being currently lowered. This happens for + example during the lowering of existential `impl Trait` +3. A `NodeId` that will be placed into a HIR structure must be lowered, + even if its `HirId` is unused. Calling + `let _ = self.lower_node_id(node_id);` is perfectly legitimate. +4. If you are creating new nodes that didn't exist in the `AST`, you *must* + create new ids for them. This is done by calling the `next_id` method, + which produces both a new `NodeId` as well as automatically lowering it + for you so you also get the `HirId`. + +If you are creating new `DefId`s, since each `DefId` needs to have a +corresponding `NodeId`, it is advisable to add these `NodeId`s to the +`AST` so you don't have to generate new ones during lowering. This has +the advantage of creating a way to find the `DefId` of something via its +`NodeId`. If lowering needs this `DefId` in multiple places, you can't +generate a new `NodeId` in all those places because you'd also get a new +`DefId` then. With a `NodeId` from the `AST` this is not an issue. + +Having the `NodeId` also allows the `DefCollector` to generate the `DefId`s +instead of lowering having to do it on the fly. Centralizing the `DefId` +generation in one place makes it easier to refactor and reason about. diff --git a/src/doc/rustc-guide/src/macro-expansion.md b/src/doc/rustc-guide/src/macro-expansion.md new file mode 100644 index 0000000000..c62301247a --- /dev/null +++ b/src/doc/rustc-guide/src/macro-expansion.md @@ -0,0 +1,212 @@ +# Macro expansion + +Macro expansion happens during parsing. `rustc` has two parsers, in fact: the +normal Rust parser, and the macro parser. During the parsing phase, the normal +Rust parser will set aside the contents of macros and their invocations. Later, +before name resolution, macros are expanded using these portions of the code. +The macro parser, in turn, may call the normal Rust parser when it needs to +bind a metavariable (e.g. `$my_expr`) while parsing the contents of a macro +invocation. The code for macro expansion is in +[`src/libsyntax/ext/tt/`][code_dir]. This chapter aims to explain how macro +expansion works. + +### Example + +It's helpful to have an example to refer to. For the remainder of this chapter, +whenever we refer to the "example _definition_", we mean the following: + +```rust,ignore +macro_rules! printer { + (print $mvar:ident) => { + println!("{}", $mvar); + } + (print twice $mvar:ident) => { + println!("{}", $mvar); + println!("{}", $mvar); + } +} +``` + +`$mvar` is called a _metavariable_. Unlike normal variables, rather than +binding to a value in a computation, a metavariable binds _at compile time_ to +a tree of _tokens_. A _token_ is a single "unit" of the grammar, such as an +identifier (e.g. `foo`) or punctuation (e.g. `=>`). There are also other +special tokens, such as `EOF`, which indicates that there are no more tokens. +Token trees resulting from paired parentheses-like characters (`(`...`)`, +`[`...`]`, and `{`...`}`) – they include the open and close and all the tokens +in between (we do require that parentheses-like characters be balanced). Having +macro expansion operate on token streams rather than the raw bytes of a source +file abstracts away a lot of complexity. The macro expander (and much of the +rest of the compiler) doesn't really care that much about the exact line and +column of some syntactic construct in the code; it cares about what constructs +are used in the code. Using tokens allows us to care about _what_ without +worrying about _where_. For more information about tokens, see the +[Parsing][parsing] chapter of this book. + +Whenever we refer to the "example _invocation_", we mean the following snippet: + +```rust,ignore +printer!(print foo); // Assume `foo` is a variable defined somewhere else... +``` + +The process of expanding the macro invocation into the syntax tree +`println!("{}", foo)` and then expanding that into a call to `Display::fmt` is +called _macro expansion_, and it is the topic of this chapter. + +### The macro parser + +There are two parts to macro expansion: parsing the definition and parsing the +invocations. Interestingly, both are done by the macro parser. + +Basically, the macro parser is like an NFA-based regex parser. It uses an +algorithm similar in spirit to the [Earley parsing +algorithm](https://en.wikipedia.org/wiki/Earley_parser). The macro parser is +defined in [`src/libsyntax/ext/tt/macro_parser.rs`][code_mp]. + +The interface of the macro parser is as follows (this is slightly simplified): + +```rust,ignore +fn parse( + sess: ParserSession, + tts: TokenStream, + ms: &[TokenTree] +) -> NamedParseResult +``` + +In this interface: + +- `sess` is a "parsing session", which keeps track of some metadata. Most + notably, this is used to keep track of errors that are generated so they can + be reported to the user. +- `tts` is a stream of tokens. The macro parser's job is to consume the raw + stream of tokens and output a binding of metavariables to corresponding token + trees. +- `ms` a _matcher_. This is a sequence of token trees that we want to match + `tts` against. + +In the analogy of a regex parser, `tts` is the input and we are matching it +against the pattern `ms`. Using our examples, `tts` could be the stream of +tokens containing the inside of the example invocation `print foo`, while `ms` +might be the sequence of token (trees) `print $mvar:ident`. + +The output of the parser is a `NamedParseResult`, which indicates which of +three cases has occurred: + +- Success: `tts` matches the given matcher `ms`, and we have produced a binding + from metavariables to the corresponding token trees. +- Failure: `tts` does not match `ms`. This results in an error message such as + "No rule expected token _blah_". +- Error: some fatal error has occurred _in the parser_. For example, this + happens if there are more than one pattern match, since that indicates + the macro is ambiguous. + +The full interface is defined [here][code_parse_int]. + +The macro parser does pretty much exactly the same as a normal regex parser with +one exception: in order to parse different types of metavariables, such as +`ident`, `block`, `expr`, etc., the macro parser must sometimes call back to the +normal Rust parser. + +As mentioned above, both definitions and invocations of macros are parsed using +the macro parser. This is extremely non-intuitive and self-referential. The code +to parse macro _definitions_ is in +[`src/libsyntax/ext/tt/macro_rules.rs`][code_mr]. It defines the pattern for +matching for a macro definition as `$( $lhs:tt => $rhs:tt );+`. In other words, +a `macro_rules` definition should have in its body at least one occurrence of a +token tree followed by `=>` followed by another token tree. When the compiler +comes to a `macro_rules` definition, it uses this pattern to match the two token +trees per rule in the definition of the macro _using the macro parser itself_. +In our example definition, the metavariable `$lhs` would match the patterns of +both arms: `(print $mvar:ident)` and `(print twice $mvar:ident)`. And `$rhs` +would match the bodies of both arms: `{ println!("{}", $mvar); }` and `{ +println!("{}", $mvar); println!("{}", $mvar); }`. The parser would keep this +knowledge around for when it needs to expand a macro invocation. + +When the compiler comes to a macro invocation, it parses that invocation using +the same NFA-based macro parser that is described above. However, the matcher +used is the first token tree (`$lhs`) extracted from the arms of the macro +_definition_. Using our example, we would try to match the token stream `print +foo` from the invocation against the matchers `print $mvar:ident` and `print +twice $mvar:ident` that we previously extracted from the definition. The +algorithm is exactly the same, but when the macro parser comes to a place in the +current matcher where it needs to match a _non-terminal_ (e.g. `$mvar:ident`), +it calls back to the normal Rust parser to get the contents of that +non-terminal. In this case, the Rust parser would look for an `ident` token, +which it finds (`foo`) and returns to the macro parser. Then, the macro parser +proceeds in parsing as normal. Also, note that exactly one of the matchers from +the various arms should match the invocation; if there is more than one match, +the parse is ambiguous, while if there are no matches at all, there is a syntax +error. + +For more information about the macro parser's implementation, see the comments +in [`src/libsyntax/ext/tt/macro_parser.rs`][code_mp]. + +### Hygiene + +If you have ever used C/C++ preprocessor macros, you know that there are some +annoying and hard-to-debug gotchas! For example, consider the following C code: + +```c +#define DEFINE_FOO struct Bar {int x;}; struct Foo {Bar bar;}; + +// Then, somewhere else +struct Bar { + ... +}; + +DEFINE_FOO +``` + +Most people avoid writing C like this – and for good reason: it doesn't +compile. The `struct Bar` defined by the macro clashes names with the `struct +Bar` defined in the code. Consider also the following example: + +```c +#define DO_FOO(x) {\ + int y = 0;\ + foo(x, y);\ + } + +// Then elsewhere +int y = 22; +DO_FOO(y); +``` + +Do you see the problem? We wanted to generate a call `foo(22, 0)`, but instead +we got `foo(0, 0)` because the macro defined its own `y`! + +These are both examples of _macro hygiene_ issues. _Hygiene_ relates to how to +handle names defined _within a macro_. In particular, a hygienic macro system +prevents errors due to names introduced within a macro. Rust macros are hygienic +in that they do not allow one to write the sorts of bugs above. + +At a high level, hygiene within the rust compiler is accomplished by keeping +track of the context where a name is introduced and used. We can then +disambiguate names based on that context. Future iterations of the macro system +will allow greater control to the macro author to use that context. For example, +a macro author may want to introduce a new name to the context where the macro +was called. Alternately, the macro author may be defining a variable for use +only within the macro (i.e. it should not be visible outside the macro). + +In rustc, this "context" is tracked via `Span`s. + +TODO: what is call-site hygiene? what is def-site hygiene? + +TODO + +### Procedural Macros + +TODO + +### Custom Derive + +TODO + +TODO: maybe something about macros 2.0? + + +[code_dir]: https://github.com/rust-lang/rust/tree/master/src/libsyntax/ext/tt +[code_mp]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ext/tt/macro_parser/ +[code_mr]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ext/tt/macro_rules/ +[code_parse_int]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ext/tt/macro_parser/fn.parse.html +[parsing]: ./the-parser.html diff --git a/src/doc/rustc-guide/src/method-lookup.md b/src/doc/rustc-guide/src/method-lookup.md new file mode 100644 index 0000000000..5aafb6abf0 --- /dev/null +++ b/src/doc/rustc-guide/src/method-lookup.md @@ -0,0 +1,121 @@ +# Method lookup + +Method lookup can be rather complex due to the interaction of a number +of factors, such as self types, autoderef, trait lookup, etc. This +file provides an overview of the process. More detailed notes are in +the code itself, naturally. + +One way to think of method lookup is that we convert an expression of +the form: + +```rust,ignore +receiver.method(...) +``` + +into a more explicit [UFCS] form: + +```rust,ignore +Trait::method(ADJ(receiver), ...) // for a trait call +ReceiverType::method(ADJ(receiver), ...) // for an inherent method call +``` + +Here `ADJ` is some kind of adjustment, which is typically a series of +autoderefs and then possibly an autoref (e.g., `&**receiver`). However +we sometimes do other adjustments and coercions along the way, in +particular unsizing (e.g., converting from `[T; n]` to `[T]`). + +Method lookup is divided into two major phases: + +1. Probing ([`probe.rs`][probe]). The probe phase is when we decide what method + to call and how to adjust the receiver. +2. Confirmation ([`confirm.rs`][confirm]). The confirmation phase "applies" + this selection, updating the side-tables, unifying type variables, and + otherwise doing side-effectful things. + +One reason for this division is to be more amenable to caching. The +probe phase produces a "pick" (`probe::Pick`), which is designed to be +cacheable across method-call sites. Therefore, it does not include +inference variables or other information. + +[UFCS]: https://github.com/rust-lang/rfcs/blob/master/text/0132-ufcs.md +[probe]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_typeck/check/method/probe/ +[confirm]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_typeck/check/method/confirm/ + +## The Probe phase + +### Steps + +The first thing that the probe phase does is to create a series of +*steps*. This is done by progressively dereferencing the receiver type +until it cannot be deref'd anymore, as well as applying an optional +"unsize" step. So if the receiver has type `Rc>`, this +might yield: + +```rust,ignore +Rc> +Box<[T; 3]> +[T; 3] +[T] +``` + +### Candidate assembly + +We then search along those steps to create a list of *candidates*. A +`Candidate` is a method item that might plausibly be the method being +invoked. For each candidate, we'll derive a "transformed self type" +that takes into account explicit self. + +Candidates are grouped into two kinds, inherent and extension. + +**Inherent candidates** are those that are derived from the +type of the receiver itself. So, if you have a receiver of some +nominal type `Foo` (e.g., a struct), any methods defined within an +impl like `impl Foo` are inherent methods. Nothing needs to be +imported to use an inherent method, they are associated with the type +itself (note that inherent impls can only be defined in the same +module as the type itself). + +FIXME: Inherent candidates are not always derived from impls. If you +have a trait object, such as a value of type `Box`, then the +trait methods (`to_string()`, in this case) are inherently associated +with it. Another case is type parameters, in which case the methods of +their bounds are inherent. However, this part of the rules is subject +to change: when DST's "impl Trait for Trait" is complete, trait object +dispatch could be subsumed into trait matching, and the type parameter +behavior should be reconsidered in light of where clauses. + +TODO: Is this FIXME still accurate? + +**Extension candidates** are derived from imported traits. If I have +the trait `ToString` imported, and I call `to_string()` on a value of +type `T`, then we will go off to find out whether there is an impl of +`ToString` for `T`. These kinds of method calls are called "extension +methods". They can be defined in any module, not only the one that +defined `T`. Furthermore, you must import the trait to call such a +method. + +So, let's continue our example. Imagine that we were calling a method +`foo` with the receiver `Rc>` and there is a trait `Foo` +that defines it with `&self` for the type `Rc` as well as a method +on the type `Box` that defines `Foo` but with `&mut self`. Then we +might have two candidates: +```text +&Rc> from the impl of `Foo` for `Rc` where `U=Box +&mut Box<[T; 3]>> from the inherent impl on `Box` where `U=[T; 3]` +``` + +### Candidate search + +Finally, to actually pick the method, we will search down the steps, +trying to match the receiver type against the candidate types. At +each step, we also consider an auto-ref and auto-mut-ref to see whether +that makes any of the candidates match. We pick the first step where +we find a match. + +In the case of our example, the first step is `Rc>`, +which does not itself match any candidate. But when we autoref it, we +get the type `&Rc>` which does match. We would then +recursively consider all where-clauses that appear on the impl: if +those match (or we cannot rule out that they do), then this is the +method we would pick. Otherwise, we would continue down the series of +steps. diff --git a/src/doc/rustc-guide/src/mir/construction.md b/src/doc/rustc-guide/src/mir/construction.md new file mode 100644 index 0000000000..03e220cb45 --- /dev/null +++ b/src/doc/rustc-guide/src/mir/construction.md @@ -0,0 +1,150 @@ +# MIR construction + +The lowering of [HIR] to [MIR] occurs for the following (probably incomplete) +list of items: + +* Function and Closure bodies +* Initializers of `static` and `const` items +* Initializers of enum discriminants +* Glue and Shims of any kind + * Tuple struct initializer functions + * Drop code (the `Drop::drop` function is not called directly) + * Drop implementations of types without an explicit `Drop` implementation + +The lowering is triggered by calling the [`mir_built`] query. +There is an intermediate representation +between [HIR] and [MIR] called the [HAIR] that is only used during the lowering. +The [HAIR]'s most important feature is that the various adjustments (which happen +without explicit syntax) like coercions, autoderef, autoref and overloaded method +calls have become explicit casts, deref operations, reference expressions or +concrete function calls. + +The [HAIR] has datatypes that mirror the [HIR] datatypes, but instead of e.g. `-x` +being a `hair::ExprKind::Neg(hair::Expr)` it is a `hair::ExprKind::Neg(hir::Expr)`. +This shallowness enables the `HAIR` to represent all datatypes that [HIR] has, but +without having to create an in-memory copy of the entire [HIR]. +[MIR] lowering will first convert the topmost expression from +[HIR] to [HAIR] (in +[https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/hair/cx/expr/index.html]) +and then process the [HAIR] expressions recursively. + +The lowering creates local variables for every argument as specified in the signature. +Next it creates local variables for every binding specified (e.g. `(a, b): (i32, String)`) +produces 3 bindings, one for the argument, and two for the bindings. Next it generates +field accesses that read the fields from the argument and writes the value to the binding +variable. + +With this initialization out of the way, the lowering triggers a recursive call +to a function that generates the MIR for the body (a `Block` expression) and +writes the result into the `RETURN_PLACE`. + +## `unpack!` all the things + +Functions that generate MIR tend to fall into one of two patterns. +First, if the function generates only statements, then it will take a +basic block as argument onto which those statements should be appended. +It can then return a result as normal: + +```rust,ignore +fn generate_some_mir(&mut self, block: BasicBlock) -> ResultType { + ... +} +``` + +But there are other functions that may generate new basic blocks as well. +For example, lowering an expression like `if foo { 22 } else { 44 }` +requires generating a small "diamond-shaped graph". +In this case, the functions take a basic block where their code starts +and return a (potentially) new basic block where the code generation ends. +The `BlockAnd` type is used to represent this: + +```rust,ignore +fn generate_more_mir(&mut self, block: BasicBlock) -> BlockAnd { + ... +} +``` + +When you invoke these functions, it is common to have a local variable `block` +that is effectively a "cursor". It represents the point at which we are adding new MIR. +When you invoke `generate_more_mir`, you want to update this cursor. +You can do this manually, but it's tedious: + +```rust,ignore +let mut block; +let v = match self.generate_more_mir(..) { + BlockAnd { block: new_block, value: v } => { + block = new_block; + v + } +}; +``` + +For this reason, we offer a macro that lets you write +`let v = unpack!(block = self.generate_more_mir(...))`. +It simply extracts the new block and overwrites the +variable `block` that you named in the `unpack!`. + +## Lowering expressions into the desired MIR + +There are essentially four kinds of representations one might want of an expression: + +* `Place` refers to a (or part of a) preexisting memory location (local, static, promoted) +* `Rvalue` is something that can be assigned to a `Place` +* `Operand` is an argument to e.g. a `+` operation or a function call +* a temporary variable containing a copy of the value + +We start out with lowering the function body to an `Rvalue` so we can create an +assignment to `RETURN_PLACE`, This `Rvalue` lowering will in turn trigger lowering to +`Operand` for its arguments (if any). `Operand` lowering either produces a `const` +operand, or moves/copies out of a `Place`, thus triggering a `Place` lowering. An +expression being lowered to a `Place` can in turn trigger a temporary to be created +if the expression being lowered contains operations. This is where the snake bites its +own tail and we need to trigger an `Rvalue` lowering for the expression to be written +into the local. + +## Operator lowering + +Operators on builtin types are not lowered to function calls (which would end up being +infinite recursion calls, because the trait impls just contain the operation itself +again). Instead there are `Rvalue`s for binary and unary operators and index operations. +These `Rvalue`s later get codegened to llvm primitive operations or llvm intrinsics. + +Operators on all other types get lowered to a function call to their `impl` of the +operator's corresponding trait. + +Regardless of the lowering kind, the arguments to the operator are lowered to `Operand`s. +This means all arguments are either constants, or refer to an already existing value +somewhere in a local or static. + +## Method call lowering + +Method calls are lowered to the same `TerminatorKind` that function calls are. +In [MIR] there is no difference between method calls and function calls anymore. + +## Conditions + +`if` conditions and `match` statements for `enum`s without variants with fields are +lowered to `TerminatorKind::SwitchInt`. Each possible value (so `0` and `1` for `if` +conditions) has a corresponding `BasicBlock` to which the code continues. +The argument being branched on is (again) an `Operand` representing the value of +the if condition. + +### Pattern matching + +`match` statements for `enum`s with variants that have fields are lowered to +`TerminatorKind::SwitchInt`, too, but the `Operand` refers to a `Place` where the +discriminant of the value can be found. This often involves reading the discriminant +to a new temporary variable. + +## Aggregate construction + +Aggregate values of any kind (e.g. structs or tuples) are built via `Rvalue::Aggregate`. +All fields are +lowered to `Operator`s. This is essentially equivalent to one assignment +statement per aggregate field plus an assignment to the discriminant in the +case of `enum`s. + +[MIR]: ./index.html +[HIR]: ../hir.html +[HAIR]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/hair/index.html +[`mir_built`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/transform/fn.mir_built.html diff --git a/src/doc/rustc-guide/src/mir/index.md b/src/doc/rustc-guide/src/mir/index.md new file mode 100644 index 0000000000..baaf23c369 --- /dev/null +++ b/src/doc/rustc-guide/src/mir/index.md @@ -0,0 +1,247 @@ +# The MIR (Mid-level IR) + +MIR is Rust's _Mid-level Intermediate Representation_. It is +constructed from [HIR](../hir.html). MIR was introduced in +[RFC 1211]. It is a radically simplified form of Rust that is used for +certain flow-sensitive safety checks – notably the borrow checker! – +and also for optimization and code generation. + +If you'd like a very high-level introduction to MIR, as well as some +of the compiler concepts that it relies on (such as control-flow +graphs and desugaring), you may enjoy the +[rust-lang blog post that introduced MIR][blog]. + +[blog]: https://blog.rust-lang.org/2016/04/19/MIR.html + +## Introduction to MIR + +MIR is defined in the [`src/librustc/mir/`][mir] module, but much of the code +that manipulates it is found in [`src/librustc_mir`][mirmanip]. + +[RFC 1211]: http://rust-lang.github.io/rfcs/1211-mir.html + +Some of the key characteristics of MIR are: + +- It is based on a [control-flow graph][cfg]. +- It does not have nested expressions. +- All types in MIR are fully explicit. + +[cfg]: ../appendix/background.html#cfg + +## Key MIR vocabulary + +This section introduces the key concepts of MIR, summarized here: + +- **Basic blocks**: units of the control-flow graph, consisting of: + - **statements:** actions with one successor + - **terminators:** actions with potentially multiple successors; always at + the end of a block + - (if you're not familiar with the term *basic block*, see the [background + chapter][cfg]) +- **Locals:** Memory locations allocated on the stack (conceptually, at + least), such as function arguments, local variables, and + temporaries. These are identified by an index, written with a + leading underscore, like `_1`. There is also a special "local" + (`_0`) allocated to store the return value. +- **Places:** expressions that identify a location in memory, like `_1` or + `_1.f`. +- **Rvalues:** expressions that produce a value. The "R" stands for + the fact that these are the "right-hand side" of an assignment. + - **Operands:** the arguments to an rvalue, which can either be a + constant (like `22`) or a place (like `_1`). + +You can get a feeling for how MIR is structed by translating simple +programs into MIR and reading the pretty printed output. In fact, the +playground makes this easy, since it supplies a MIR button that will +show you the MIR for your program. Try putting this program into play +(or [clicking on this link][sample-play]), and then clicking the "MIR" +button on the top: + +[sample-play]: https://play.rust-lang.org/?gist=30074856e62e74e91f06abd19bd72ece&version=stable + +```rust +fn main() { + let mut vec = Vec::new(); + vec.push(1); + vec.push(2); +} +``` + +You should see something like: + +```mir +// WARNING: This output format is intended for human consumers only +// and is subject to change without notice. Knock yourself out. +fn main() -> () { + ... +} +``` + +This is the MIR format for the `main` function. + +**Variable declarations.** If we drill in a bit, we'll see it begins +with a bunch of variable declarations. They look like this: + +```mir +let mut _0: (); // return place +scope 1 { + let mut _1: std::vec::Vec; // "vec" in scope 1 at src/main.rs:2:9: 2:16 +} +scope 2 { +} +let mut _2: (); +let mut _3: &mut std::vec::Vec; +let mut _4: (); +let mut _5: &mut std::vec::Vec; +``` + +You can see that variables in MIR don't have names, they have indices, +like `_0` or `_1`. We also intermingle the user's variables (e.g., +`_1`) with temporary values (e.g., `_2` or `_3`). You can tell the +difference between user-defined variables have a comment that gives +you their original name (`// "vec" in scope 1...`). The "scope" blocks +(e.g., `scope 1 { .. }`) describe the lexical structure of the source +program (which names were in scope when). + +**Basic blocks.** Reading further, we see our first **basic block** (naturally +it may look slightly different when you view it, and I am ignoring some of the +comments): + +```mir +bb0: { + StorageLive(_1); + _1 = const >::new() -> bb2; +} +``` + +A basic block is defined by a series of **statements** and a final +**terminator**. In this case, there is one statement: + +```mir +StorageLive(_1); +``` + +This statement indicates that the variable `_1` is "live", meaning +that it may be used later – this will persist until we encounter a +`StorageDead(_1)` statement, which indicates that the variable `_1` is +done being used. These "storage statements" are used by LLVM to +allocate stack space. + +The **terminator** of the block `bb0` is the call to `Vec::new`: + +```mir +_1 = const >::new() -> bb2; +``` + +Terminators are different from statements because they can have more +than one successor – that is, control may flow to different +places. Function calls like the call to `Vec::new` are always +terminators because of the possibility of unwinding, although in the +case of `Vec::new` we are able to see that indeed unwinding is not +possible, and hence we list only one succssor block, `bb2`. + +If we look ahead to `bb2`, we will see it looks like this: + +```mir +bb2: { + StorageLive(_3); + _3 = &mut _1; + _2 = const >::push(move _3, const 1i32) -> [return: bb3, unwind: bb4]; +} +``` + +Here there are two statements: another `StorageLive`, introducing the `_3` +temporary, and then an assignment: + +```mir +_3 = &mut _1; +``` + +Assignments in general have the form: + +```text + = +``` + +A place is an expression like `_3`, `_3.f` or `*_3` – it denotes a +location in memory. An **Rvalue** is an expression that creates a +value: in this case, the rvalue is a mutable borrow expression, which +looks like `&mut `. So we can kind of define a grammar for +rvalues like so: + +```text + = & (mut)? + | + + | - + | ... + + = Constant + | copy Place + | move Place +``` + +As you can see from this grammar, rvalues cannot be nested – they can +only reference places and constants. Moreover, when you use a place, +we indicate whether we are **copying it** (which requires that the +place have a type `T` where `T: Copy`) or **moving it** (which works +for a place of any type). So, for example, if we had the expression `x += a + b + c` in Rust, that would get compile to two statements and a +temporary: + +```mir +TMP1 = a + b +x = TMP1 + c +``` + +([Try it and see][play-abc], though you may want to do release mode to skip +over the overflow checks.) + +[play-abc]: https://play.rust-lang.org/?gist=1751196d63b2a71f8208119e59d8a5b6&version=stable + +## MIR data types + +The MIR data types are defined in the [`src/librustc/mir/`][mir] +module. Each of the key concepts mentioned in the previous section +maps in a fairly straightforward way to a Rust type. + +The main MIR data type is `Mir`. It contains the data for a single +function (along with sub-instances of Mir for "promoted constants", +but [you can read about those below](#promoted)). + +- **Basic blocks**: The basic blocks are stored in the field + `basic_blocks`; this is a vector of `BasicBlockData` + structures. Nobody ever references a basic block directly: instead, + we pass around `BasicBlock` values, which are + [newtype'd] indices into this vector. +- **Statements** are represented by the type `Statement`. +- **Terminators** are represented by the `Terminator`. +- **Locals** are represented by a [newtype'd] index type `Local`. The + data for a local variable is found in the `Mir` (the `local_decls` + vector). There is also a special constant `RETURN_PLACE` identifying + the special "local" representing the return value. +- **Places** are identified by the enum `Place`. There are a few variants: + - Local variables like `_1` + - Static variables `FOO` + - **Projections**, which are fields or other things that "project + out" from a base place. So e.g. the place `_1.f` is a projection, + with `f` being the "projection element and `_1` being the base + path. `*_1` is also a projection, with the `*` being represented + by the `ProjectionElem::Deref` element. +- **Rvalues** are represented by the enum `Rvalue`. +- **Operands** are represented by the enum `Operand`. + +## Representing constants + +*to be written* + + + +### Promoted constants + +*to be written* + + +[mir]: https://github.com/rust-lang/rust/tree/master/src/librustc/mir +[mirmanip]: https://github.com/rust-lang/rust/tree/master/src/librustc_mir +[mir]: https://github.com/rust-lang/rust/tree/master/src/librustc/mir +[newtype'd]: ../appendix/glossary.html diff --git a/src/doc/rustc-guide/src/mir/optimizations.md b/src/doc/rustc-guide/src/mir/optimizations.md new file mode 100644 index 0000000000..ddafa0c99c --- /dev/null +++ b/src/doc/rustc-guide/src/mir/optimizations.md @@ -0,0 +1 @@ +# MIR optimizations diff --git a/src/doc/rustc-guide/src/mir/passes.md b/src/doc/rustc-guide/src/mir/passes.md new file mode 100644 index 0000000000..7dc1249a0f --- /dev/null +++ b/src/doc/rustc-guide/src/mir/passes.md @@ -0,0 +1,177 @@ +# MIR passes + +If you would like to get the MIR for a function (or constant, etc), +you can use the `optimized_mir(def_id)` query. This will give you back +the final, optimized MIR. For foreign def-ids, we simply read the MIR +from the other crate's metadata. But for local def-ids, the query will +construct the MIR and then iteratively optimize it by applying a +series of passes. This section describes how those passes work and how +you can extend them. + +To produce the `optimized_mir(D)` for a given def-id `D`, the MIR +passes through several suites of optimizations, each represented by a +query. Each suite consists of multiple optimizations and +transformations. These suites represent useful intermediate points +where we want to access the MIR for type checking or other purposes: + +- `mir_build(D)` – not a query, but this constructs the initial MIR +- `mir_const(D)` – applies some simple transformations to make MIR ready for + constant evaluation; +- `mir_validated(D)` – applies some more transformations, making MIR ready for + borrow checking; +- `optimized_mir(D)` – the final state, after all optimizations have been + performed. + +### Seeing how the MIR changes as the compiler executes + +`-Zdump-mir=F` is a handy compiler options that will let you view the MIR for +each function at each stage of compilation. `-Zdump-mir` takes a **filter** `F` +which allows you to control which functions and which passes you are +interesting in. For example: + +```bash +> rustc -Zdump-mir=foo ... +``` + +This will dump the MIR for any function whose name contains `foo`; it +will dump the MIR both before and after every pass. Those files will +be created in the `mir_dump` directory. There will likely be quite a +lot of them! + +```bash +> cat > foo.rs +fn main() { + println!("Hello, world!"); +} +^D +> rustc -Zdump-mir=main foo.rs +> ls mir_dump/* | wc -l + 161 +``` + +The files have names like `rustc.main.000-000.CleanEndRegions.after.mir`. These +names have a number of parts: + +```text +rustc.main.000-000.CleanEndRegions.after.mir + ---- --- --- --------------- ----- either before or after + | | | name of the pass + | | index of dump within the pass (usually 0, but some passes dump intermediate states) + | index of the pass + def-path to the function etc being dumped +``` + +You can also make more selective filters. For example, `main & CleanEndRegions` +will select for things that reference *both* `main` and the pass +`CleanEndRegions`: + +```bash +> rustc -Zdump-mir='main & CleanEndRegions' foo.rs +> ls mir_dump +rustc.main.000-000.CleanEndRegions.after.mir rustc.main.000-000.CleanEndRegions.before.mir +``` + +Filters can also have `|` parts to combine multiple sets of +`&`-filters. For example `main & CleanEndRegions | main & +NoLandingPads` will select *either* `main` and `CleanEndRegions` *or* +`main` and `NoLandingPads`: + +```bash +> rustc -Zdump-mir='main & CleanEndRegions | main & NoLandingPads' foo.rs +> ls mir_dump +rustc.main-promoted[0].002-000.NoLandingPads.after.mir +rustc.main-promoted[0].002-000.NoLandingPads.before.mir +rustc.main-promoted[0].002-006.NoLandingPads.after.mir +rustc.main-promoted[0].002-006.NoLandingPads.before.mir +rustc.main-promoted[1].002-000.NoLandingPads.after.mir +rustc.main-promoted[1].002-000.NoLandingPads.before.mir +rustc.main-promoted[1].002-006.NoLandingPads.after.mir +rustc.main-promoted[1].002-006.NoLandingPads.before.mir +rustc.main.000-000.CleanEndRegions.after.mir +rustc.main.000-000.CleanEndRegions.before.mir +rustc.main.002-000.NoLandingPads.after.mir +rustc.main.002-000.NoLandingPads.before.mir +rustc.main.002-006.NoLandingPads.after.mir +rustc.main.002-006.NoLandingPads.before.mir +``` + +(Here, the `main-promoted[0]` files refer to the MIR for "promoted constants" +that appeared within the `main` function.) + +### Implementing and registering a pass + +A `MirPass` is some bit of code that processes the MIR, typically – +but not always – transforming it along the way somehow. For example, +it might perform an optimization. The `MirPass` trait itself is found +in in [the `rustc_mir::transform` module][mirtransform], and it +basically consists of one method, `run_pass`, that simply gets an +`&mut Mir` (along with the tcx and some information about where it +came from). The MIR is therefore modified in place (which helps to +keep things efficient). + +A good example of a basic MIR pass is [`NoLandingPads`], which walks +the MIR and removes all edges that are due to unwinding – this is +used when configured with `panic=abort`, which never unwinds. As you +can see from its source, a MIR pass is defined by first defining a +dummy type, a struct with no fields, something like: + +```rust +struct MyPass; +``` + +for which you then implement the `MirPass` trait. You can then insert +this pass into the appropriate list of passes found in a query like +`optimized_mir`, `mir_validated`, etc. (If this is an optimization, it +should go into the `optimized_mir` list.) + +If you are writing a pass, there's a good chance that you are going to +want to use a [MIR visitor]. MIR visitors are a handy way to walk all +the parts of the MIR, either to search for something or to make small +edits. + +### Stealing + +The intermediate queries `mir_const()` and `mir_validated()` yield up +a `&'tcx Steal>`, allocated using +`tcx.alloc_steal_mir()`. This indicates that the result may be +**stolen** by the next suite of optimizations – this is an +optimization to avoid cloning the MIR. Attempting to use a stolen +result will cause a panic in the compiler. Therefore, it is important +that you do not read directly from these intermediate queries except as +part of the MIR processing pipeline. + +Because of this stealing mechanism, some care must also be taken to +ensure that, before the MIR at a particular phase in the processing +pipeline is stolen, anyone who may want to read from it has already +done so. Concretely, this means that if you have some query `foo(D)` +that wants to access the result of `mir_const(D)` or +`mir_validated(D)`, you need to have the successor pass "force" +`foo(D)` using `ty::queries::foo::force(...)`. This will force a query +to execute even though you don't directly require its result. + +As an example, consider MIR const qualification. It wants to read the +result produced by the `mir_const()` suite. However, that result will +be **stolen** by the `mir_validated()` suite. If nothing was done, +then `mir_const_qualif(D)` would succeed if it came before +`mir_validated(D)`, but fail otherwise. Therefore, `mir_validated(D)` +will **force** `mir_const_qualif` before it actually steals, thus +ensuring that the reads have already happened (remember that +[queries are memoized](../query.html), so executing a query twice +simply loads from a cache the second time): + +```text +mir_const(D) --read-by--> mir_const_qualif(D) + | ^ + stolen-by | + | (forces) + v | +mir_validated(D) ------------+ +``` + +This mechanism is a bit dodgy. There is a discussion of more elegant +alternatives in [rust-lang/rust#41710]. + +[rust-lang/rust#41710]: https://github.com/rust-lang/rust/issues/41710 +[mirtransform]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/transform/ +[`NoLandingPads`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/transform/no_landing_pads/struct.NoLandingPads.html +[MIR visitor]: ./visitor.html diff --git a/src/doc/rustc-guide/src/mir/visitor.md b/src/doc/rustc-guide/src/mir/visitor.md new file mode 100644 index 0000000000..ad00bc3f16 --- /dev/null +++ b/src/doc/rustc-guide/src/mir/visitor.md @@ -0,0 +1,55 @@ +# MIR visitor + +The MIR visitor is a convenient tool for traversing the MIR and either +looking for things or making changes to it. The visitor traits are +defined in [the `rustc::mir::visit` module][m-v] – there are two of +them, generated via a single macro: `Visitor` (which operates on a +`&Mir` and gives back shared references) and `MutVisitor` (which +operates on a `&mut Mir` and gives back mutable references). + +[m-v]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/visit/index.html + +To implement a visitor, you have to create a type that represents +your visitor. Typically, this type wants to "hang on" to whatever +state you will need while processing MIR: + +```rust,ignore +struct MyVisitor<...> { + tcx: TyCtxt<'cx, 'tcx, 'tcx>, + ... +} +``` + +and you then implement the `Visitor` or `MutVisitor` trait for that type: + +```rust,ignore +impl<'tcx> MutVisitor<'tcx> for NoLandingPads { + fn visit_foo(&mut self, ...) { + ... + self.super_foo(...); + } +} +``` + +As shown above, within the impl, you can override any of the +`visit_foo` methods (e.g., `visit_terminator`) in order to write some +code that will execute whenever a `foo` is found. If you want to +recursively walk the contents of the `foo`, you then invoke the +`super_foo` method. (NB. You never want to override `super_foo`.) + +A very simple example of a visitor can be found in [`NoLandingPads`]. +That visitor doesn't even require any state: it just visits all +terminators and removes their `unwind` successors. + +[`NoLandingPads`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/transform/no_landing_pads/struct.NoLandingPads.html + +## Traversal + +In addition the visitor, [the `rustc::mir::traversal` module][t] +contains useful functions for walking the MIR CFG in +[different standard orders][traversal] (e.g. pre-order, reverse +post-order, and so forth). + +[t]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/traversal/index.html +[traversal]: https://en.wikipedia.org/wiki/Tree_traversal + diff --git a/src/doc/rustc-guide/src/miri.md b/src/doc/rustc-guide/src/miri.md new file mode 100644 index 0000000000..a3c7b3ff4a --- /dev/null +++ b/src/doc/rustc-guide/src/miri.md @@ -0,0 +1,142 @@ +# Miri + +Miri (**MIR** **I**nterpreter) is a virtual machine for executing MIR without +compiling to machine code. It is usually invoked via `tcx.const_eval`. + +If you start out with a constant + +```rust +const FOO: usize = 1 << 12; +``` + +rustc doesn't actually invoke anything until the constant is either used or +placed into metadata. + +Once you have a use-site like + +```rust,ignore +type Foo = [u8; FOO - 42]; +``` + +The compiler needs to figure out the length of the array before being able to +create items that use the type (locals, constants, function arguments, ...). + +To obtain the (in this case empty) parameter environment, one can call +`let param_env = tcx.param_env(length_def_id);`. The `GlobalId` needed is + +```rust,ignore +let gid = GlobalId { + promoted: None, + instance: Instance::mono(length_def_id), +}; +``` + +Invoking `tcx.const_eval(param_env.and(gid))` will now trigger the creation of +the MIR of the array length expression. The MIR will look something like this: + +```mir +const Foo::{{initializer}}: usize = { + let mut _0: usize; // return pointer + let mut _1: (usize, bool); + + bb0: { + _1 = CheckedSub(const Unevaluated(FOO, Slice([])), const 42usize); + assert(!(_1.1: bool), "attempt to subtract with overflow") -> bb1; + } + + bb1: { + _0 = (_1.0: usize); + return; + } +} +``` + +Before the evaluation, a virtual memory location (in this case essentially a +`vec![u8; 4]` or `vec![u8; 8]`) is created for storing the evaluation result. + +At the start of the evaluation, `_0` and `_1` are +`Value::ByVal(PrimVal::Undef)`. When the initialization of `_1` is invoked, the +value of the `FOO` constant is required, and triggers another call to +`tcx.const_eval`, which will not be shown here. If the evaluation of FOO is +successful, 42 will be subtracted by its value `4096` and the result stored in +`_1` as `Value::ByValPair(PrimVal::Bytes(4054), PrimVal::Bytes(0))`. The first +part of the pair is the computed value, the second part is a bool that's true if +an overflow happened. + +The next statement asserts that said boolean is `0`. In case the assertion +fails, its error message is used for reporting a compile-time error. + +Since it does not fail, `Value::ByVal(PrimVal::Bytes(4054))` is stored in the +virtual memory was allocated before the evaluation. `_0` always refers to that +location directly. + +After the evaluation is done, the virtual memory allocation is interned into the +`TyCtxt`. Future evaluations of the same constants will not actually invoke +miri, but just extract the value from the interned allocation. + +The `tcx.const_eval` function has one additional feature: it will not return a +`ByRef(interned_allocation_id)`, but a `ByVal(computed_value)` if possible. This +makes using the result much more convenient, as no further queries need to be +executed in order to get at something as simple as a `usize`. + +## Datastructures + +Miri's core datastructures can be found in +[librustc/mir/interpret](https://github.com/rust-lang/rust/blob/master/src/librustc/mir/interpret). +This is mainly the error enum and the `Value` and `PrimVal` types. A `Value` can +be either `ByVal` (a single `PrimVal`), `ByValPair` (two `PrimVal`s, usually fat +pointers or two element tuples) or `ByRef`, which is used for anything else and +refers to a virtual allocation. These allocations can be accessed via the +methods on `tcx.interpret_interner`. + +If you are expecting a numeric result, you can use `unwrap_u64` (panics on +anything that can't be representad as a `u64`) or `to_raw_bits` which results +in an `Option` yielding the `ByVal` if possible. + +## Allocations + +A miri allocation is either a byte sequence of the memory or an `Instance` in +the case of function pointers. Byte sequences can additionally contain +relocations that mark a group of bytes as a pointer to another allocation. The +actual bytes at the relocation refer to the offset inside the other allocation. + +These allocations exist so that references and raw pointers have something to +point to. There is no global linear heap in which things are allocated, but each +allocation (be it for a local variable, a static or a (future) heap allocation) +gets its own little memory with exactly the required size. So if you have a +pointer to an allocation for a local variable `a`, there is no possible (no +matter how unsafe) operation that you can do that would ever change said pointer +to a pointer to `b`. + +## Interpretation + +Although the main entry point to constant evaluation is the `tcx.const_eval` +query, there are additional functions in +[librustc_mir/const_eval.rs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir/const_eval/index.html) +that allow accessing the fields of a `Value` (`ByRef` or otherwise). You should +never have to access an `Allocation` directly except for translating it to the +compilation target (at the moment just LLVM). + +Miri starts by creating a virtual stack frame for the current constant that is +being evaluated. There's essentially no difference between a constant and a +function with no arguments, except that constants do not allow local (named) +variables at the time of writing this guide. + +A stack frame is defined by the `Frame` type in +[librustc_mir/interpret/eval_context.rs](https://github.com/rust-lang/rust/blob/master/src/librustc_mir/interpret/eval_context.rs) +and contains all the local +variables memory (`None` at the start of evaluation). Each frame refers to the +evaluation of either the root constant or subsequent calls to `const fn`. The +evaluation of another constant simply calls `tcx.const_eval`, which produces an +entirely new and independent stack frame. + +The frames are just a `Vec`, there's no way to actually refer to a +`Frame`'s memory even if horrible shenigans are done via unsafe code. The only +memory that can be referred to are `Allocation`s. + +Miri now calls the `step` method (in +[librustc_mir/interpret/step.rs](https://github.com/rust-lang/rust/blob/master/src/librustc_mir/interpret/step.rs) +) until it either returns an error or has no further statements to execute. Each +statement will now initialize or modify the locals or the virtual memory +referred to by a local. This might require evaluating other constants or +statics, which just recursively invokes `tcx.const_eval`. diff --git a/src/doc/rustc-guide/src/name-resolution.md b/src/doc/rustc-guide/src/name-resolution.md new file mode 100644 index 0000000000..3117217e5e --- /dev/null +++ b/src/doc/rustc-guide/src/name-resolution.md @@ -0,0 +1,119 @@ +# Name resolution + +The name resolution is a two-phase process. In the first phase, which runs +during macro expansion, we build a tree of modules and resolve imports. Macro +expansion and name resolution communicate with each other via the `Resolver` +trait, defined in `libsyntax`. + +The input to the second phase is the syntax tree, produced by parsing input +files and expanding macros. This phase produces links from all the names in the +source to relevant places where the name was introduced. It also generates +helpful error messages, like typo suggestions, traits to import or lints about +unused items. + +A successful run of the second phase (`Resolver::resolve_crate`) creates kind +of an index the rest of the compilation may use to ask about the present names +(through the `hir::lowering::Resolver` interface). + +The name resolution lives in the `librustc_resolve` crate, with the meat in +`lib.rs` and some helpers or symbol-type specific logic in the other modules. + +## Namespaces + +Different kind of symbols live in different namespaces ‒ eg. types don't +clash with variables. This usually doesn't happen, because variables start with +lower-case letter while types with upper case one, but this is only a +convention. This is legal Rust code that'll compile (with warnings): + +```rust +type x = u32; +let x: x = 1; +let y: x = 2; // See? x is still a type here. +``` + +To cope with this, and with slightly different scoping rules for these +namespaces, the resolver keeps them separated and builds separate structures for +them. + +In other words, when the code talks about namespaces, it doesn't mean the module +hierarchy, it's types vs. values vs. macros. + +## Scopes and ribs + +A name is visible only in certain area in the source code. This forms a +hierarchical structure, but not necessarily a simple one ‒ if one scope is +part of another, it doesn't mean the name visible in the outer one is also +visible in the inner one, or that it refers to the same thing. + +To cope with that, the compiler introduces the concept of Ribs. This is +abstraction of a scope. Every time the set of visible names potentially changes, +a new rib is pushed onto a stack. The places where this can happen includes for +example: + +* The obvious places ‒ curly braces enclosing a block, function boundaries, + modules. +* Introducing a let binding ‒ this can shadow another binding with the same + name. +* Macro expansion border ‒ to cope with macro hygiene. + +When searching for a name, the stack of ribs is traversed from the innermost +outwards. This helps to find the closest meaning of the name (the one not +shadowed by anything else). The transition to outer rib may also change the +rules what names are usable ‒ if there are nested functions (not closures), +the inner one can't access parameters and local bindings of the outer one, +even though they should be visible by ordinary scoping rules. An example: + +```rust +fn do_something(val: T) { // <- New rib in both types and values (1) + // `val` is accessible, as is the helper function + // `T` is accessible + let helper = || { // New rib on `helper` (2) and another on the block (3) + // `val` is accessible here + }; // End of (3) + // `val` is accessible, `helper` variable shadows `helper` function + fn helper() { // <- New rib in both types and values (4) + // `val` is not accessible here, (4) is not transparent for locals) + // `T` is not accessible here + } // End of (4) + let val = T::default(); // New rib (5) + // `val` is the variable, not the parameter here +} // End of (5), (2) and (1) +``` + +Because the rules for different namespaces are a bit different, each namespace +has its own independent rib stack that is constructed in parallel to the others. +In addition, there's also a rib stack for local labels (eg. names of loops or +blocks), which isn't a full namespace in its own right. + +## Overall strategy + +To perform the name resolution of the whole crate, the syntax tree is traversed +top-down and every encountered name is resolved. This works for most kinds of +names, because at the point of use of a name it is already introduced in the Rib +hierarchy. + +There are some exceptions to this. Items are bit tricky, because they can be +used even before encountered ‒ therefore every block needs to be first scanned +for items to fill in its Rib. + +Other, even more problematic ones, are imports which need recursive fixed-point +resolution and macros, that need to be resolved and expanded before the rest of +the code can be processed. + +Therefore, the resolution is performed in multiple stages. + +## TODO: + +This is a result of the first pass of learning the code. It is definitely +incomplete and not detailed enough. It also might be inaccurate in places. +Still, it probably provides useful first guidepost to what happens in there. + +* What exactly does it link to and how is that published and consumed by + following stages of compilation? +* Who calls it and how it is actually used. +* Is it a pass and then the result is only used, or can it be computed + incrementally (eg. for RLS)? +* The overall strategy description is a bit vague. +* Where does the name `Rib` come from? +* Does this thing have its own tests, or is it tested only as part of some e2e + testing? diff --git a/src/doc/rustc-guide/src/param_env.md b/src/doc/rustc-guide/src/param_env.md new file mode 100644 index 0000000000..2474886efe --- /dev/null +++ b/src/doc/rustc-guide/src/param_env.md @@ -0,0 +1,30 @@ +# Parameter Environment + +When working with associated and/or or generic items (types, constants, +functions/methods) it is often relevant to have more information about the +`Self` or generic parameters. Trait bounds and similar information is encoded in +the `ParamEnv`. Often this is not enough information to obtain things like the +type's `Layout`, but you can do all kinds of other checks on it (e.g. whether a +type implements `Copy`) or you can evaluate an associated constant whose value +does not depend on anything from the parameter environment. + +For example if you have a function + +```rust +fn foo(t: T) { +} +``` + +the parameter environment for that function is `[T: Copy]`. This means any +evaluation within this function will, when accessing the type `T`, know about +its `Copy` bound via the parameter environment. + +Although you can obtain a valid `ParamEnv` for any item via +`tcx.param_env(def_id)`, this `ParamEnv` can be too generic for your use case. +Using the `ParamEnv` from the surrounding context can allow you to evaluate more +things. + +Another great thing about `ParamEnv` is that you can use it to bundle the thing +depending on generic parameters (e.g. a `Ty`) by calling `param_env.and(ty)`. +This will produce a `ParamEnvAnd`, making clear that you should probably not +be using the inner value without taking care to also use the `ParamEnv`. diff --git a/src/doc/rustc-guide/src/profiling.md b/src/doc/rustc-guide/src/profiling.md new file mode 100644 index 0000000000..f6771b07bb --- /dev/null +++ b/src/doc/rustc-guide/src/profiling.md @@ -0,0 +1,9 @@ +# Profiling the compiler + +This discussion talks about how profile the compiler and find out +where it spends its time. If you just want to get a general overview, +it is often a good idea to just add `-Zself-profile` option to the +rustc command line. This will break down time spent into various +categories. But if you want a more detailed look, you probably want +to break out a custom profiler. + diff --git a/src/doc/rustc-guide/src/profiling/with_perf.md b/src/doc/rustc-guide/src/profiling/with_perf.md new file mode 100644 index 0000000000..2588994002 --- /dev/null +++ b/src/doc/rustc-guide/src/profiling/with_perf.md @@ -0,0 +1,331 @@ +# Profiling with perf + +This is a guide for how to profile rustc with [perf](https://perf.wiki.kernel.org/index.php/Main_Page). + +## Initial steps + +- Get a clean checkout of rust-lang/master, or whatever it is you want + to profile. +- Set the following settings in your `config.toml`: + - `debuginfo-lines = true` + - `use-jemalloc = false` — lets you do memory use profiling with valgrind + - leave everything else the defaults +- Run `./x.py build` to get a full build +- Make a rustup toolchain pointing to that result + - see [the "build and run" section for instructions][b-a-r] + +[b-a-r]: ../how-to-build-and-run.html#toolchain + +## Gathering a perf profile + +perf is an excellent tool on linux that can be used to gather and +analyze all kinds of information. Mostly it is used to figure out +where a program spends its time. It can also be used for other sorts +of events, though, like cache misses and so forth. + +### The basics + +The basic `perf` command is this: + +```bash +> perf record -F99 --call-graph dwarf XXX +``` + +The `-F99` tells perf to sample at 99 Hz, which avoids generating too +much data for longer runs (why 99 Hz you ask? It is often chosen +because it is unlikely to be in lockstep with other periodic +activity). The `--call-graph dwarf` tells perf to get call-graph +information from debuginfo, which is accurate. The `XXX` is the +command you want to profile. So, for example, you might do: + +```bash +> perf record -F99 --call-graph dwarf cargo + rustc +``` + +to run `cargo` -- here `` should be the name of the toolchain +you made in the beginning. But there are some things to be aware of: + +- You probably don't want to profile the time spend building + dependencies. So something like `cargo build; cargo clean -p $C` may + be helpful (where `$C` is the crate name) + - Though usually I just do `touch src/lib.rs` and rebuild instead. =) +- You probably don't want incremental messing about with your + profile. So something like `CARGO_INCREMENTAL=0` can be helpful. + +### Gathering a perf profile from a `perf.rust-lang.org` test + +Often we want to analyze a specific test from `perf.rust-lang.org`. To +do that, the first step is to clone +[the rustc-perf repository][rustc-perf-gh]: + +```bash +> git clone https://github.com/rust-lang-nursery/rustc-perf +``` + +[rustc-perf-gh]: https://github.com/rust-lang-nursery/rustc-perf + +#### Doing it the easy way + +Once you've cloned the repo, you can use the `collector` executable to +do profiling for you! You can find +[instructions in the rustc-perf readme][rustc-perf-readme]. + +[rustc-perf-readme]: https://github.com/rust-lang-nursery/rustc-perf/blob/master/collector/README.md#profiling + +For example, to measure the clap-rs test, you might do: + +```bash +> ./target/release/collector + --output-repo /path/to/place/output + profile perf-record + --rustc /path/to/rustc/executable/from/your/build/directory + --cargo `which cargo` + --filter clap-rs + --builds Check +``` + +You can also use that same command to use cachegrind or other profiling tools. + +#### Doing it the hard way + +If you prefer to run things manually, that is also possible. You first +need to find the source for the test you want. Sources for the tests +are found in [the `collector/benchmarks` directory][dir]. So let's go +into the directory of a specific test; we'll use `clap-rs` as an +example: + +[dir]: https://github.com/rust-lang-nursery/rustc-perf/tree/master/collector/benchmarks + +```bash +> cd collector/benchmarks/clap-rs +``` + +In this case, let's say we want to profile the `cargo check` +performance. In that case, I would first run some basic commands to +build the dependencies: + +```bash +# Setup: first clean out any old results and build the dependencies: +> cargo + clean +> CARGO_INCREMENTAL=0 cargo + check +``` + +(Again, `` should be replaced with the name of the +toolchain we made in the first step.) + +Next: we want record the execution time for *just* the clap-rs crate, +running cargo check. I tend to use `cargo rustc` for this, since it +also allows me to add explicit flags, which we'll do later on. + +```bash +> touch src/lib.rs +> CARGO_INCREMENTAL=0 perf record -F99 --call-graph dwarf cargo rustc --profile check --lib +``` + +Note that final command: it's a doozy! It uses the `cargo rustc` +command, which executes rustc with (potentially) additional options; +the `--profile check` and `--lib` options specify that we are doing a +`cargo check` execution, and that this is a library (not a binary). + +At this point, we can use `perf` tooling to analyze the results. For example: + +```bash +> perf report +``` + +will open up an interactive TUI program. In simple cases, that can be +helpful. For more detailed examination, the [`perf-focus` tool][pf] +can be helpful; it is covered below. + +**A note of caution.** Each of the rustc-perf tests is its own special + snowflake. In particular, some of them are not libraries, in which + case you would want to do `touch src/main.rs` and avoid passing + `--lib`. I'm not sure how best to tell which test is which to be + honest. + +### Gathering NLL data + +If you want to profile an NLL run, you can just pass extra options to +the `cargo rustc` command, like so: + +```bash +> touch src/lib.rs +> CARGO_INCREMENTAL=0 perf record -F99 --call-graph dwarf cargo rustc --profile check --lib -- -Zborrowck=mir +``` + +[pf]: https://github.com/nikomatsakis/perf-focus + +## Analyzing a perf profile with `perf focus` + +Once you've gathered a perf profile, we want to get some information +about it. For this, I personally use [perf focus][pf]. It's a kind of +simple but useful tool that lets you answer queries like: + +- "how much time was spent in function F" (no matter where it was called from) +- "how much time was spent in function F when it was called from G" +- "how much time was spent in function F *excluding* time spent in G" +- "what functions does F call and how much time does it spend in them" + +To understand how it works, you have to know just a bit about +perf. Basically, perf works by *sampling* your process on a regular +basis (or whenever some event occurs). For each sample, perf gathers a +backtrace. `perf focus` lets you write a regular expression that tests +which functions appear in that backtrace, and then tells you which +percentage of samples had a backtrace that met the regular +expression. It's probably easiest to explain by walking through how I +would analyze NLL performance. + +### Installing `perf-focus` + +You can install perf-focus using `cargo install`: + +```bash +> cargo install perf-focus +``` + +### Example: How much time is spent in MIR borrowck? + +Let's say we've gathered the NLL data for a test. We'd like to know +how much time it is spending in the MIR borrow-checker. The "main" +function of the MIR borrowck is called `do_mir_borrowck`, so we can do +this command: + +```bash +> perf focus '{do_mir_borrowck}' +Matcher : {do_mir_borrowck} +Matches : 228 +Not Matches: 542 +Percentage : 29% +``` + +The `'{do_mir_borrowck}'` argument is called the **matcher**. It +specifies the test to be applied on the backtrace. In this case, the +`{X}` indicates that there must be *some* function on the backtrace +that meets the regular expression `X`. In this case, that regex is +just the name of the function we want (in fact, it's a subset of the name; +the full name includes a bunch of other stuff, like the module +path). In this mode, perf-focus just prints out the percentage of +samples where `do_mir_borrowck` was on the stack: in this case, 29%. + +**A note about c++filt.** To get the data from `perf`, `perf focus` + currently executes `perf script` (perhaps there is a better + way...). I've sometimes found that `perf script` outputs C++ mangled + names. This is annoying. You can tell by running `perf script | + head` yourself — if you see names like `5rustc6middle` instead of + `rustc::middle`, then you have the same problem. You can solve this + by doing: + +```bash +> perf script | c++filt | perf focus --from-stdin ... +``` + +This will pipe the output from `perf script` through `c++filt` and +should mostly convert those names into a more friendly format. The +`--from-stdin` flag to `perf focus` tells it to get its data from +stdin, rather than executing `perf focus`. We should make this more +convenient (at worst, maybe add a `c++filt` option to `perf focus`, or +just always use it — it's pretty harmless). + +### Example: How much time does MIR borrowck spend solving traits? + +Perhaps we'd like to know how much time MIR borrowck spends in the +trait checker. We can ask this using a more complex regex: + +```bash +> perf focus '{do_mir_borrowck}..{^rustc::traits}' +Matcher : {do_mir_borrowck},..{^rustc::traits} +Matches : 12 +Not Matches: 1311 +Percentage : 0% +``` + +Here we used the `..` operator to ask "how often do we have +`do_mir_borrowck` on the stack and then, later, some function whose +name begins with `rusc::traits`?" (basically, code in that module). It +turns out the answer is "almost never" — only 12 samples fit that +description (if you ever see *no* samples, that often indicates your +query is messed up). + +If you're curious, you can find out exactly which samples by using the +`--print-match` option. This will print out the full backtrace for +each sample. The `|` at the front of the line indicates the part that +the regular expression matched. + +### Example: Where does MIR borrowck spend its time? + +Often we want to do a more "explorational" queries. Like, we know that +MIR borrowck is 29% of the time, but where does that time get spent? +For that, the `--tree-callees` option is often the best tool. You +usually also want to give `--tree-min-percent` or +`--tree-max-depth`. The result looks like this: + +```bash +> perf focus '{do_mir_borrowck}' --tree-callees --tree-min-percent 3 +Matcher : {do_mir_borrowck} +Matches : 577 +Not Matches: 746 +Percentage : 43% + +Tree +| matched `{do_mir_borrowck}` (43% total, 0% self) +: | rustc_mir::borrow_check::nll::compute_regions (20% total, 0% self) +: : | rustc_mir::borrow_check::nll::type_check::type_check_internal (13% total, 0% self) +: : : | core::ops::function::FnOnce::call_once (5% total, 0% self) +: : : : | rustc_mir::borrow_check::nll::type_check::liveness::generate (5% total, 3% self) +: : : | as rustc::mir::visit::Visitor<'tcx>>::visit_mir (3% total, 0% self) +: | rustc::mir::visit::Visitor::visit_mir (8% total, 6% self) +: | as rustc_mir::dataflow::DataflowResultsConsumer<'cx, 'tcx>>::visit_statement_entry (5% total, 0% self) +: | rustc_mir::dataflow::do_dataflow (3% total, 0% self) +``` + +What happens with `--tree-callees` is that + +- we find each sample matching the regular expression +- we look at the code that is occurs *after* the regex match and try + to build up a call tree + +The `--tree-min-percent 3` option says "only show me things that take +more than 3% of the time. Without this, the tree often gets really +noisy and includes random stuff like the innards of +malloc. `--tree-max-depth` can be useful too, it just limits how many +levels we print. + +For each line, we display the percent of time in that function +altogether ("total") and the percent of time spent in **just that +function and not some callee of that function** (self). Usually +"total" is the more interesting number, but not always. + +### Relative percentages + +By default, all in perf-focus are relative to the **total program +execution**. This is useful to help you keep perspective — often as +we drill down to find hot spots, we can lose sight of the fact that, +in terms of overall program execution, this "hot spot" is actually not +important. It also ensures that percentages between different queries +are easily compared against one another. + +That said, sometimes it's useful to get relative percentages, so `perf +focus` offers a `--relative` option. In this case, the percentages are +listed only for samples that match (vs all samples). So for example we +could get our percentages relative to the borrowck itself +like so: + +```bash +> perf focus '{do_mir_borrowck}' --tree-callees --relative --tree-max-depth 1 --tree-min-percent 5 +Matcher : {do_mir_borrowck} +Matches : 577 +Not Matches: 746 +Percentage : 100% + +Tree +| matched `{do_mir_borrowck}` (100% total, 0% self) +: | rustc_mir::borrow_check::nll::compute_regions (47% total, 0% self) [...] +: | rustc::mir::visit::Visitor::visit_mir (19% total, 15% self) [...] +: | as rustc_mir::dataflow::DataflowResultsConsumer<'cx, 'tcx>>::visit_statement_entry (13% total, 0% self) [...] +: | rustc_mir::dataflow::do_dataflow (8% total, 1% self) [...] +``` + +Here you see that `compute_regions` came up as "47% total" — that +means that 47% of `do_mir_borrowck` is spent in that function. Before, +we saw 20% — that's because `do_mir_borrowck` itself is only 43% of +the total time (and `.47 * .43 = .20`). diff --git a/src/doc/rustc-guide/src/query.md b/src/doc/rustc-guide/src/query.md new file mode 100644 index 0000000000..a0c0d62829 --- /dev/null +++ b/src/doc/rustc-guide/src/query.md @@ -0,0 +1,317 @@ +# Queries: demand-driven compilation + +As described in [the high-level overview of the compiler][hl], the +Rust compiler is current transitioning from a traditional "pass-based" +setup to a "demand-driven" system. **The Compiler Query System is the +key to our new demand-driven organization.** The idea is pretty +simple. You have various queries that compute things about the input +– for example, there is a query called `type_of(def_id)` that, given +the def-id of some item, will compute the type of that item and return +it to you. + +[hl]: high-level-overview.html + +Query execution is **memoized** – so the first time you invoke a +query, it will go do the computation, but the next time, the result is +returned from a hashtable. Moreover, query execution fits nicely into +**incremental computation**; the idea is roughly that, when you do a +query, the result **may** be returned to you by loading stored data +from disk (but that's a separate topic we won't discuss further here). + +The overall vision is that, eventually, the entire compiler +control-flow will be query driven. There will effectively be one +top-level query ("compile") that will run compilation on a crate; this +will in turn demand information about that crate, starting from the +*end*. For example: + +- This "compile" query might demand to get a list of codegen-units + (i.e. modules that need to be compiled by LLVM). +- But computing the list of codegen-units would invoke some subquery + that returns the list of all modules defined in the Rust source. +- That query in turn would invoke something asking for the HIR. +- This keeps going further and further back until we wind up doing the + actual parsing. + +However, that vision is not fully realized. Still, big chunks of the +compiler (for example, generating MIR) work exactly like this. + +### Invoking queries + +To invoke a query is simple. The tcx ("type context") offers a method +for each defined query. So, for example, to invoke the `type_of` +query, you would just do this: + +```rust,ignore +let ty = tcx.type_of(some_def_id); +``` + +### Cycles between queries + +A cycle is when a query becomes stuck in a loop e.g. query A generates query B +which generates query A again. + +Currently, cycles during query execution should always result in a +compilation error. Typically, they arise because of illegal programs +that contain cyclic references they shouldn't (though sometimes they +arise because of compiler bugs, in which case we need to factor our +queries in a more fine-grained fashion to avoid them). + +However, it is nonetheless often useful to *recover* from a cycle +(after reporting an error, say) and try to soldier on, so as to give a +better user experience. In order to recover from a cycle, you don't +get to use the nice method-call-style syntax. Instead, you invoke +using the `try_get` method, which looks roughly like this: + +```rust,ignore +use ty::queries; +... +match queries::type_of::try_get(tcx, DUMMY_SP, self.did) { + Ok(result) => { + // no cycle occurred! You can use `result` + } + Err(err) => { + // A cycle occurred! The error value `err` is a `DiagnosticBuilder`, + // meaning essentially an "in-progress", not-yet-reported error message. + // See below for more details on what to do here. + } +} +``` + +So, if you get back an `Err` from `try_get`, then a cycle *did* occur. This +means that you must ensure that a compiler error message is reported. You can +do that in two ways: + +The simplest is to invoke `err.emit()`. This will emit the cycle error to the +user. + +However, often cycles happen because of an illegal program, and you +know at that point that an error either already has been reported or +will be reported due to this cycle by some other bit of code. In that +case, you can invoke `err.cancel()` to not emit any error. It is +traditional to then invoke: + +```rust,ignore +tcx.sess.delay_span_bug(some_span, "some message") +``` + +`delay_span_bug()` is a helper that says: we expect a compilation +error to have happened or to happen in the future; so, if compilation +ultimately succeeds, make an ICE with the message `"some +message"`. This is basically just a precaution in case you are wrong. + +### How the compiler executes a query + +So you may be wondering what happens when you invoke a query +method. The answer is that, for each query, the compiler maintains a +cache – if your query has already been executed, then, the answer is +simple: we clone the return value out of the cache and return it +(therefore, you should try to ensure that the return types of queries +are cheaply cloneable; insert a `Rc` if necessary). + +#### Providers + +If, however, the query is *not* in the cache, then the compiler will +try to find a suitable **provider**. A provider is a function that has +been defined and linked into the compiler somewhere that contains the +code to compute the result of the query. + +**Providers are defined per-crate.** The compiler maintains, +internally, a table of providers for every crate, at least +conceptually. Right now, there are really two sets: the providers for +queries about the **local crate** (that is, the one being compiled) +and providers for queries about **external crates** (that is, +dependencies of the local crate). Note that what determines the crate +that a query is targeting is not the *kind* of query, but the *key*. +For example, when you invoke `tcx.type_of(def_id)`, that could be a +local query or an external query, depending on what crate the `def_id` +is referring to (see the `self::keys::Key` trait for more information +on how that works). + +Providers always have the same signature: + +```rust,ignore +fn provider<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx, 'tcx>, + key: QUERY_KEY) + -> QUERY_RESULT +{ + ... +} +``` + +Providers take two arguments: the `tcx` and the query key. Note also +that they take the *global* tcx (i.e. they use the `'tcx` lifetime +twice), rather than taking a tcx with some active inference context. +They return the result of the query. + +#### How providers are setup + +When the tcx is created, it is given the providers by its creator using +the `Providers` struct. This struct is generated by the macros here, but it +is basically a big list of function pointers: + +```rust,ignore +struct Providers { + type_of: for<'cx, 'tcx> fn(TyCtxt<'cx, 'tcx, 'tcx>, DefId) -> Ty<'tcx>, + ... +} +``` + +At present, we have one copy of the struct for local crates, and one +for external crates, though the plan is that we may eventually have +one per crate. + +These `Provider` structs are ultimately created and populated by +`librustc_driver`, but it does this by distributing the work +throughout the other `rustc_*` crates. This is done by invoking +various `provide` functions. These functions tend to look something +like this: + +```rust,ignore +pub fn provide(providers: &mut Providers) { + *providers = Providers { + type_of, + ..*providers + }; +} +``` + +That is, they take an `&mut Providers` and mutate it in place. Usually +we use the formulation above just because it looks nice, but you could +as well do `providers.type_of = type_of`, which would be equivalent. +(Here, `type_of` would be a top-level function, defined as we saw +before.) So, if we want to add a provider for some other query, +let's call it `fubar`, into the crate above, we might modify the `provide()` +function like so: + +```rust,ignore +pub fn provide(providers: &mut Providers) { + *providers = Providers { + type_of, + fubar, + ..*providers + }; +} + +fn fubar<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx>, key: DefId) -> Fubar<'tcx> { ... } +``` + +N.B. Most of the `rustc_*` crates only provide **local +providers**. Almost all **extern providers** wind up going through the +[`rustc_metadata` crate][rustc_metadata], which loads the information from the +crate metadata. But in some cases there are crates that provide queries for +*both* local and external crates, in which case they define both a +`provide` and a `provide_extern` function that `rustc_driver` can +invoke. + +[rustc_metadata]: https://github.com/rust-lang/rust/tree/master/src/librustc_metadata + +### Adding a new kind of query + +So suppose you want to add a new kind of query, how do you do so? +Well, defining a query takes place in two steps: + +1. first, you have to specify the query name and arguments; and then, +2. you have to supply query providers where needed. + +To specify the query name and arguments, you simply add an entry to +the big macro invocation in +[`src/librustc/ty/query/mod.rs`][query-mod], which looks something like: + +[query-mod]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/query/index.html + +```rust,ignore +define_queries! { <'tcx> + /// Records the type of every item. + [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, + + ... +} +``` + +Each line of the macro defines one query. The name is broken up like this: + +```rust,ignore +[] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, +^^ ^^^^^^^ ^^^^^^^^^^ ^^^^^ ^^^^^^^^ +| | | | | +| | | | result type of query +| | | query key type +| | dep-node constructor +| name of query +query flags +``` + +Let's go over them one by one: + +- **Query flags:** these are largely unused right now, but the intention + is that we'll be able to customize various aspects of how the query is + processed. +- **Name of query:** the name of the query method + (`tcx.type_of(..)`). Also used as the name of a struct + (`ty::queries::type_of`) that will be generated to represent + this query. +- **Dep-node constructor:** indicates the constructor function that + connects this query to incremental compilation. Typically, this is a + `DepNode` variant, which can be added by modifying the + `define_dep_nodes!` macro invocation in + [`librustc/dep_graph/dep_node.rs`][dep-node]. + - However, sometimes we use a custom function, in which case the + name will be in snake case and the function will be defined at the + bottom of the file. This is typically used when the query key is + not a def-id, or just not the type that the dep-node expects. +- **Query key type:** the type of the argument to this query. + This type must implement the `ty::query::keys::Key` trait, which + defines (for example) how to map it to a crate, and so forth. +- **Result type of query:** the type produced by this query. This type + should (a) not use `RefCell` or other interior mutability and (b) be + cheaply cloneable. Interning or using `Rc` or `Arc` is recommended for + non-trivial data types. + - The one exception to those rules is the `ty::steal::Steal` type, + which is used to cheaply modify MIR in place. See the definition + of `Steal` for more details. New uses of `Steal` should **not** be + added without alerting `@rust-lang/compiler`. + +[dep-node]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/dep_graph/struct.DepNode.html + +So, to add a query: + +- Add an entry to `define_queries!` using the format above. +- Possibly add a corresponding entry to the dep-node macro. +- Link the provider by modifying the appropriate `provide` method; + or add a new one if needed and ensure that `rustc_driver` is invoking it. + +#### Query structs and descriptions + +For each kind, the `define_queries` macro will generate a "query struct" +named after the query. This struct is a kind of a place-holder +describing the query. Each such struct implements the +`self::config::QueryConfig` trait, which has associated types for the +key/value of that particular query. Basically the code generated looks something +like this: + +```rust,ignore +// Dummy struct representing a particular kind of query: +pub struct type_of<'tcx> { phantom: PhantomData<&'tcx ()> } + +impl<'tcx> QueryConfig for type_of<'tcx> { + type Key = DefId; + type Value = Ty<'tcx>; +} +``` + +There is an additional trait that you may wish to implement called +`self::config::QueryDescription`. This trait is used during cycle +errors to give a "human readable" name for the query, so that we can +summarize what was happening when the cycle occurred. Implementing +this trait is optional if the query key is `DefId`, but if you *don't* +implement it, you get a pretty generic error ("processing `foo`..."). +You can put new impls into the `config` module. They look something like this: + +```rust,ignore +impl<'tcx> QueryDescription for queries::type_of<'tcx> { + fn describe(tcx: TyCtxt, key: DefId) -> String { + format!("computing the type of `{}`", tcx.item_path_str(key)) + } +} +``` + diff --git a/src/doc/rustc-guide/src/rustc-driver.md b/src/doc/rustc-guide/src/rustc-driver.md new file mode 100644 index 0000000000..3979498367 --- /dev/null +++ b/src/doc/rustc-guide/src/rustc-driver.md @@ -0,0 +1,76 @@ +# The Rustc Driver + +The [`rustc_driver`] is essentially `rustc`'s `main()` function. It acts as +the glue for running the various phases of the compiler in the correct order, +managing state such as the [`SourceMap`] \(maps AST nodes to source code), +[`Session`] \(general build context and error messaging) and the [`TyCtxt`] +\(the "typing context", allowing you to query the type system and other cool +stuff). The `rustc_driver` crate also provides external users with a method +for running code at particular times during the compilation process, allowing +third parties to effectively use `rustc`'s internals as a library for +analysing a crate or emulating the compiler in-process (e.g. the RLS). + +For those using `rustc` as a library, the `run_compiler()` function is the main +entrypoint to the compiler. Its main parameters are a list of command-line +arguments and a reference to something which implements the `CompilerCalls` +trait. A `CompilerCalls` creates the overall `CompileController`, letting it +govern which compiler passes are run and attach callbacks to be fired at the end +of each phase. + +From `rustc_driver`'s perspective, the main phases of the compiler are: + +1. *Parse Input:* Initial crate parsing +2. *Configure and Expand:* Resolve `#[cfg]` attributes, name resolution, and + expand macros +3. *Run Analysis Passes:* Run trait resolution, typechecking, region checking + and other miscellaneous analysis passes on the crate +4. *Translate to LLVM:* Translate to the in-memory form of LLVM IR and turn it + into an executable/object files + +The `CompileController` then gives users the ability to inspect the ongoing +compilation process + +- after parsing +- after AST expansion +- after HIR lowering +- after analysis, and +- when compilation is done + +The `CompileState`'s various `state_after_*()` constructors can be inspected to +determine what bits of information are available to which callback. + +For a more detailed explanation on using `rustc_driver`, check out the +[stupid-stats] guide by `@nrc` (attached as [Appendix A]). + +> **Warning:** By its very nature, the internal compiler APIs are always going +> to be unstable. That said, we do try not to break things unnecessarily. + +## A Note On Lifetimes + +The Rust compiler is a fairly large program containing lots of big data +structures (e.g. the AST, HIR, and the type system) and as such, arenas and +references are heavily relied upon to minimize unnecessary memory use. This +manifests itself in the way people can plug into the compiler, preferring a +"push"-style API (callbacks) instead of the more Rust-ic "pull" style (think +the `Iterator` trait). + +For example the [`CompileState`], the state passed to callbacks after each +phase, is essentially just a box of optional references to pieces inside the +compiler. The lifetime bound on the `CompilerCalls` trait then helps to ensure +compiler internals don't "escape" the compiler (e.g. if you tried to keep a +reference to the AST after the compiler is finished), while still letting users +record *some* state for use after the `run_compiler()` function finishes. + +Thread-local storage and interning are used a lot through the compiler to reduce +duplication while also preventing a lot of the ergonomic issues due to many +pervasive lifetimes. The `rustc::ty::tls` module is used to access these +thread-locals, although you should rarely need to touch it. + + +[`rustc_driver`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_driver/ +[`CompileState`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_driver/driver/struct.CompileState.html +[`Session`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/session/struct.Session.html +[`TyCtxt`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ty/struct.TyCtxt.html +[`SourceMap`]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceMap.html +[stupid-stats]: https://github.com/nrc/stupid-stats +[Appendix A]: appendix/stupid-stats.html diff --git a/src/doc/rustc-guide/src/rustdoc.md b/src/doc/rustc-guide/src/rustdoc.md new file mode 100644 index 0000000000..318a8e2e12 --- /dev/null +++ b/src/doc/rustc-guide/src/rustdoc.md @@ -0,0 +1,242 @@ +# The walking tour of rustdoc + +Rustdoc actually uses the rustc internals directly. It lives in-tree with the +compiler and standard library. This chapter is about how it works. + +Rustdoc is implemented entirely within the crate [`librustdoc`][rd]. It runs +the compiler up to the point where we have an internal representation of a +crate (HIR) and the ability to run some queries about the types of items. [HIR] +and [queries] are discussed in the linked chapters. + +[HIR]: ./hir.html +[queries]: ./query.html +[rd]: https://github.com/rust-lang/rust/tree/master/src/librustdoc + +`librustdoc` performs two major steps after that to render a set of +documentation: + +* "Clean" the AST into a form that's more suited to creating documentation (and + slightly more resistant to churn in the compiler). +* Use this cleaned AST to render a crate's documentation, one page at a time. + +Naturally, there's more than just this, and those descriptions simplify out +lots of details, but that's the high-level overview. + +(Side note: `librustdoc` is a library crate! The `rustdoc` binary is created +using the project in [`src/tools/rustdoc`][bin]. Note that literally all that +does is call the `main()` that's in this crate's `lib.rs`, though.) + +[bin]: https://github.com/rust-lang/rust/tree/master/src/tools/rustdoc + +## Cheat sheet + +* Use `./x.py build --stage 1 src/libstd src/tools/rustdoc` to make a usable + rustdoc you can run on other projects. + * Add `src/libtest` to be able to use `rustdoc --test`. + * If you've used `rustup toolchain link local /path/to/build/$TARGET/stage1` + previously, then after the previous build command, `cargo +local doc` will + Just Work. +* Use `./x.py doc --stage 1 src/libstd` to use this rustdoc to generate the + standard library docs. + * The completed docs will be available in `build/$TARGET/doc/std`, though the + bundle is meant to be used as though you would copy out the `doc` folder to + a web server, since that's where the CSS/JS and landing page are. +* Most of the HTML printing code is in `html/format.rs` and `html/render.rs`. + It's in a bunch of `fmt::Display` implementations and supplementary + functions. +* The types that got `Display` impls above are defined in `clean/mod.rs`, right + next to the custom `Clean` trait used to process them out of the rustc HIR. +* The bits specific to using rustdoc as a test harness are in `test.rs`. +* The Markdown renderer is loaded up in `html/markdown.rs`, including functions + for extracting doctests from a given block of Markdown. +* The tests on rustdoc *output* are located in `src/test/rustdoc`, where + they're handled by the test runner of rustbuild and the supplementary script + `src/etc/htmldocck.py`. +* Tests on search index generation are located in `src/test/rustdoc-js`, as a + series of JavaScript files that encode queries on the standard library search + index and expected results. + +## From crate to clean + +In `core.rs` are two central items: the `DocContext` struct, and the `run_core` +function. The latter is where rustdoc calls out to rustc to compile a crate to +the point where rustdoc can take over. The former is a state container used +when crawling through a crate to gather its documentation. + +The main process of crate crawling is done in `clean/mod.rs` through several +implementations of the `Clean` trait defined within. This is a conversion +trait, which defines one method: + +```rust,ignore +pub trait Clean { + fn clean(&self, cx: &DocContext) -> T; +} +``` + +`clean/mod.rs` also defines the types for the "cleaned" AST used later on to +render documentation pages. Each usually accompanies an implementation of +`Clean` that takes some AST or HIR type from rustc and converts it into the +appropriate "cleaned" type. "Big" items like modules or associated items may +have some extra processing in its `Clean` implementation, but for the most part +these impls are straightforward conversions. The "entry point" to this module +is the `impl Clean for visit_ast::RustdocVisitor`, which is called by +`run_core` above. + +You see, I actually lied a little earlier: There's another AST transformation +that happens before the events in `clean/mod.rs`. In `visit_ast.rs` is the +type `RustdocVisitor`, which *actually* crawls a `hir::Crate` to get the first +intermediate representation, defined in `doctree.rs`. This pass is mainly to +get a few intermediate wrappers around the HIR types and to process visibility +and inlining. This is where `#[doc(inline)]`, `#[doc(no_inline)]`, and +`#[doc(hidden)]` are processed, as well as the logic for whether a `pub use` +should get the full page or a "Reexport" line in the module page. + +The other major thing that happens in `clean/mod.rs` is the collection of doc +comments and `#[doc=""]` attributes into a separate field of the Attributes +struct, present on anything that gets hand-written documentation. This makes it +easier to collect this documentation later in the process. + +The primary output of this process is a `clean::Crate` with a tree of Items +which describe the publicly-documentable items in the target crate. + +### Hot potato + +Before moving on to the next major step, a few important "passes" occur over +the documentation. These do things like combine the separate "attributes" into +a single string and strip leading whitespace to make the document easier on the +markdown parser, or drop items that are not public or deliberately hidden with +`#[doc(hidden)]`. These are all implemented in the `passes/` directory, one +file per pass. By default, all of these passes are run on a crate, but the ones +regarding dropping private/hidden items can be bypassed by passing +`--document-private-items` to rustdoc. Note that unlike the previous set of AST +transformations, the passes happen on the _cleaned_ crate. + +(Strictly speaking, you can fine-tune the passes run and even add your own, but +[we're trying to deprecate that][44136]. If you need finer-grain control over +these passes, please let us know!) + +[44136]: https://github.com/rust-lang/rust/issues/44136 + +Here is current (as of this writing) list of passes: + +- `propagate-doc-cfg` - propagates `#[doc(cfg(...))]` to child items. +- `collapse-docs` concatenates all document attributes into one document + attribute. This is necessary because each line of a doc comment is given as a + separate doc attribute, and this will combine them into a single string with + line breaks between each attribute. +- `unindent-comments` removes excess indentation on comments in order for + markdown to like it. This is necessary because the convention for writing + documentation is to provide a space between the `///` or `//!` marker and the + text, and stripping that leading space will make the text easier to parse by + the Markdown parser. (In the past, the markdown parser used was not + Commonmark- compliant, which caused annoyances with extra whitespace but this + seems to be less of an issue today.) +- `strip-priv-imports` strips all private import statements (`use`, `extern + crate`) from a crate. This is necessary because rustdoc will handle *public* + imports by either inlining the item's documentation to the module or creating + a "Reexports" section with the import in it. The pass ensures that all of + these imports are actually relevant to documentation. +- `strip-hidden` and `strip-private` strip all `doc(hidden)` and private items + from the output. `strip-private` implies `strip-priv-imports`. Basically, the + goal is to remove items that are not relevant for public documentation. + +## From clean to crate + +This is where the "second phase" in rustdoc begins. This phase primarily lives +in the `html/` folder, and it all starts with `run()` in `html/render.rs`. This +code is responsible for setting up the `Context`, `SharedContext`, and `Cache` +which are used during rendering, copying out the static files which live in +every rendered set of documentation (things like the fonts, CSS, and JavaScript +that live in `html/static/`), creating the search index, and printing out the +source code rendering, before beginning the process of rendering all the +documentation for the crate. + +Several functions implemented directly on `Context` take the `clean::Crate` and +set up some state between rendering items or recursing on a module's child +items. From here the "page rendering" begins, via an enormous `write!()` call +in `html/layout.rs`. The parts that actually generate HTML from the items and +documentation occurs within a series of `std::fmt::Display` implementations and +functions that pass around a `&mut std::fmt::Formatter`. The top-level +implementation that writes out the page body is the `impl<'a> fmt::Display for +Item<'a>` in `html/render.rs`, which switches out to one of several `item_*` +functions based on the kind of `Item` being rendered. + +Depending on what kind of rendering code you're looking for, you'll probably +find it either in `html/render.rs` for major items like "what sections should I +print for a struct page" or `html/format.rs` for smaller component pieces like +"how should I print a where clause as part of some other item". + +Whenever rustdoc comes across an item that should print hand-written +documentation alongside, it calls out to `html/markdown.rs` which interfaces +with the Markdown parser. This is exposed as a series of types that wrap a +string of Markdown, and implement `fmt::Display` to emit HTML text. It takes +special care to enable certain features like footnotes and tables and add +syntax highlighting to Rust code blocks (via `html/highlight.rs`) before +running the Markdown parser. There's also a function in here +(`find_testable_code`) that specifically scans for Rust code blocks so the +test-runner code can find all the doctests in the crate. + +### From soup to nuts + +(alternate title: ["An unbroken thread that stretches from those first `Cell`s +to us"][video]) + +[video]: https://www.youtube.com/watch?v=hOLAGYmUQV0 + +It's important to note that the AST cleaning can ask the compiler for +information (crucially, `DocContext` contains a `TyCtxt`), but page rendering +cannot. The `clean::Crate` created within `run_core` is passed outside the +compiler context before being handed to `html::render::run`. This means that a +lot of the "supplementary data" that isn't immediately available inside an +item's definition, like which trait is the `Deref` trait used by the language, +needs to be collected during cleaning, stored in the `DocContext`, and passed +along to the `SharedContext` during HTML rendering. This manifests as a bunch +of shared state, context variables, and `RefCell`s. + +Also of note is that some items that come from "asking the compiler" don't go +directly into the `DocContext` - for example, when loading items from a foreign +crate, rustdoc will ask about trait implementations and generate new `Item`s +for the impls based on that information. This goes directly into the returned +`Crate` rather than roundabout through the `DocContext`. This way, these +implementations can be collected alongside the others, right before rendering +the HTML. + +## Other tricks up its sleeve + +All this describes the process for generating HTML documentation from a Rust +crate, but there are couple other major modes that rustdoc runs in. It can also +be run on a standalone Markdown file, or it can run doctests on Rust code or +standalone Markdown files. For the former, it shortcuts straight to +`html/markdown.rs`, optionally including a mode which inserts a Table of +Contents to the output HTML. + +For the latter, rustdoc runs a similar partial-compilation to get relevant +documentation in `test.rs`, but instead of going through the full clean and +render process, it runs a much simpler crate walk to grab *just* the +hand-written documentation. Combined with the aforementioned +"`find_testable_code`" in `html/markdown.rs`, it builds up a collection of +tests to run before handing them off to the libtest test runner. One notable +location in `test.rs` is the function `make_test`, which is where hand-written +doctests get transformed into something that can be executed. + +Some extra reading about `make_test` can be found +[here](https://quietmisdreavus.net/code/2018/02/23/how-the-doctests-get-made/). + +## Dotting i's and crossing t's + +So that's rustdoc's code in a nutshell, but there's more things in the repo +that deal with it. Since we have the full `compiletest` suite at hand, there's +a set of tests in `src/test/rustdoc` that make sure the final HTML is what we +expect in various situations. These tests also use a supplementary script, +`src/etc/htmldocck.py`, that allows it to look through the final HTML using +XPath notation to get a precise look at the output. The full description of all +the commands available to rustdoc tests is in `htmldocck.py`. + +In addition, there are separate tests for the search index and rustdoc's +ability to query it. The files in `src/test/rustdoc-js` each contain a +different search query and the expected results, broken out by search tab. +These files are processed by a script in `src/tools/rustdoc-js` and the Node.js +runtime. These tests don't have as thorough of a writeup, but a broad example +that features results in all tabs can be found in `basic.js`. The basic idea is +that you match a given `QUERY` with a set of `EXPECTED` results, complete with +the full item path of each item. diff --git a/src/doc/rustc-guide/src/test-implementation.md b/src/doc/rustc-guide/src/test-implementation.md new file mode 100644 index 0000000000..3c93ad619e --- /dev/null +++ b/src/doc/rustc-guide/src/test-implementation.md @@ -0,0 +1,154 @@ +### The `#[test]` attribute +Today, rust programmers rely on a built in attribute called `#[test]`. All +you have to do is mark a function as a test and include some asserts like so: + +```rust,ignore +#[test] +fn my_test() { + assert!(2+2 == 4); +} +``` + +When this program is compiled using `rustc --test` or `cargo test`, it will +produce an executable that can run this, and any other test function. This +method of testing allows tests to live alongside code in an organic way. You +can even put tests inside private modules: + +```rust,ignore +mod my_priv_mod { + fn my_priv_func() -> bool {} + + #[test] + fn test_priv_func() { + assert!(my_priv_func()); + } +} +``` +Private items can thus be easily tested without worrying about how to expose +the them to any sort of external testing apparatus. This is key to the +ergonomics of testing in Rust. Semantically, however, it's rather odd. +How does any sort of `main` function invoke these tests if they're not visible? +What exactly is `rustc --test` doing? + +`#[test]` is implemented as a syntactic transformation inside the compiler's +[`libsyntax` crate][libsyntax]. Essentially, it's a fancy macro, that +rewrites the crate in 3 steps: + +#### Step 1: Re-Exporting + +As mentioned earlier, tests can exist inside private modules, so we need a +way of exposing them to the main function, without breaking any existing +code. To that end, `libsyntax` will create local modules called +`__test_reexports` that recursively reexport tests. This expansion translates +the above example into: + +```rust,ignore +mod my_priv_mod { + fn my_priv_func() -> bool {} + + pub fn test_priv_func() { + assert!(my_priv_func()); + } + + pub mod __test_reexports { + pub use super::test_priv_func; + } +} +``` + +Now, our test can be accessed as +`my_priv_mod::__test_reexports::test_priv_func`. For deeper module +structures, `__test_reexports` will reexport modules that contain tests, so a +test at `a::b::my_test` becomes +`a::__test_reexports::b::__test_reexports::my_test`. While this process seems +pretty safe, what happens if there is an existing `__test_reexports` module? +The answer: nothing. + +To explain, we need to understand [how the AST represents +identifiers][Ident]. The name of every function, variable, module, etc. is +not stored as a string, but rather as an opaque [Symbol][Symbol] which is +essentially an ID number for each identifier. The compiler keeps a separate +hashtable that allows us to recover the human-readable name of a Symbol when +necessary (such as when printing a syntax error). When the compiler generates +the `__test_reexports` module, it generates a new Symbol for the identifier, +so while the compiler-generated `__test_reexports` may share a name with your +hand-written one, it will not share a Symbol. This technique prevents name +collision during code generation and is the foundation of Rust's macro +hygiene. + +#### Step 2: Harness Generation +Now that our tests are accessible from the root of our crate, we need to do +something with them. `libsyntax` generates a module like so: + +```rust,ignore +pub mod __test { + extern crate test; + const TESTS: &'static [self::test::TestDescAndFn] = &[/*...*/]; + + #[main] + pub fn main() { + self::test::test_static_main(TESTS); + } +} +``` + +While this transformation is simple, it gives us a lot of insight into how +tests are actually run. The tests are aggregated into an array and passed to +a test runner called `test_static_main`. We'll come back to exactly what +`TestDescAndFn` is, but for now, the key takeaway is that there is a crate +called [`test`][test] that is part of Rust core, that implements all of the +runtime for testing. `test`'s interface is unstable, so the only stable way +to interact with it is through the `#[test]` macro. + +#### Step 3: Test Object Generation +If you've written tests in Rust before, you may be familiar with some of the +optional attributes available on test functions. For example, a test can be +annotated with `#[should_panic]` if we expect the test to cause a panic. It +looks something like this: + +```rust,ignore +#[test] +#[should_panic] +fn foo() { + panic!("intentional"); +} +``` + +This means our tests are more than just simple functions, they have +configuration information as well. `test` encodes this configuration data +into a struct called [`TestDesc`][TestDesc]. For each test function in a +crate, `libsyntax` will parse its attributes and generate a `TestDesc` +instance. It then combines the `TestDesc` and test function into the +predictably named `TestDescAndFn` struct, that `test_static_main` operates +on. For a given test, the generated `TestDescAndFn` instance looks like so: + +```rust,ignore +self::test::TestDescAndFn{ + desc: self::test::TestDesc{ + name: self::test::StaticTestName("foo"), + ignore: false, + should_panic: self::test::ShouldPanic::Yes, + allow_fail: false, + }, + testfn: self::test::StaticTestFn(|| + self::test::assert_test_result(::crate::__test_reexports::foo())), +} +``` + +Once we've constructed an array of these test objects, they're passed to the +test runner via the harness generated in step 2. + +### Inspecting the generated code +On nightly rust, there's an unstable flag called `unpretty` that you can use +to print out the module source after macro expansion: + +```bash +$ rustc my_mod.rs -Z unpretty=hir +``` + +[test]: https://doc.rust-lang.org/test/index.html +[TestDesc]: https://doc.rust-lang.org/test/struct.TestDesc.html +[Symbol]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ast/struct.Ident.html +[Ident]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ast/struct.Ident.html +[eRFC]: https://github.com/rust-lang/rfcs/blob/master/text/2318-custom-test-frameworks.md +[libsyntax]: https://github.com/rust-lang/rust/tree/master/src/libsyntax \ No newline at end of file diff --git a/src/doc/rustc-guide/src/tests/adding.md b/src/doc/rustc-guide/src/tests/adding.md new file mode 100644 index 0000000000..5b86748142 --- /dev/null +++ b/src/doc/rustc-guide/src/tests/adding.md @@ -0,0 +1,346 @@ +# Adding new tests + +**In general, we expect every PR that fixes a bug in rustc to come +accompanied by a regression test of some kind.** This test should fail +in master but pass after the PR. These tests are really useful for +preventing us from repeating the mistakes of the past. + +To add a new test, the first thing you generally do is to create a +file, typically a Rust source file. Test files have a particular +structure: + +- They should have some kind of + [comment explaining what the test is about](#explanatory_comment); +- next, they can have one or more [header commands](#header_commands), which + are special comments that the test interpreter knows how to interpret. +- finally, they have the Rust source. This may have various [error + annotations](#error_annotations) which indicate expected compilation errors or + warnings. + +Depending on the test suite, there may be some other details to be aware of: + - For [the `ui` test suite](#ui), you need to generate reference output files. + +## What kind of test should I add? + +It can be difficult to know what kind of test to use. Here are some +rough heuristics: + +- Some tests have specialized needs: + - need to run gdb or lldb? use the `debuginfo` test suite + - need to inspect LLVM IR or MIR IR? use the `codegen` or `mir-opt` test + suites + - need to run rustdoc? Prefer a `rustdoc` test + - need to inspect the resulting binary in some way? Then use `run-make` +- For most other things, [a `ui` (or `ui-fulldeps`) test](#ui) is to be + preferred: + - `ui` tests subsume both run-pass, compile-fail, and parse-fail tests + - in the case of warnings or errors, `ui` tests capture the full output, + which makes it easier to review but also helps prevent "hidden" regressions + in the output + +## Naming your test + +We have not traditionally had a lot of structure in the names of +tests. Moreover, for a long time, the rustc test runner did not +support subdirectories (it now does), so test suites like +[`src/test/run-pass`] have a huge mess of files in them. This is not +considered an ideal setup. + +[`src/test/run-pass`]: https://github.com/rust-lang/rust/tree/master/src/test/run-pass/ + +For regression tests – basically, some random snippet of code that +came in from the internet – we often just name the test after the +issue. For example, `src/test/run-pass/issue-12345.rs`. If possible, +though, it is better if you can put the test into a directory that +helps identify what piece of code is being tested here (e.g., +`borrowck/issue-12345.rs` is much better), or perhaps give it a more +meaningful name. Still, **do include the issue number somewhere**. + +When writing a new feature, **create a subdirectory to store your +tests**. For example, if you are implementing RFC 1234 ("Widgets"), +then it might make sense to put the tests in directories like: + +- `src/test/ui/rfc1234-widgets/` +- `src/test/run-pass/rfc1234-widgets/` +- etc + +In other cases, there may already be a suitable directory. (The proper +directory structure to use is actually an area of active debate.) + + + +## Comment explaining what the test is about + +When you create a test file, **include a comment summarizing the point +of the test at the start of the file**. This should highlight which +parts of the test are more important, and what the bug was that the +test is fixing. Citing an issue number is often very helpful. + +This comment doesn't have to be super extensive. Just something like +"Regression test for #18060: match arms were matching in the wrong +order." might already be enough. + +These comments are very useful to others later on when your test +breaks, since they often can highlight what the problem is. They are +also useful if for some reason the tests need to be refactored, since +they let others know which parts of the test were important (often a +test must be rewritten because it no longer tests what is was meant to +test, and then it's useful to know what it *was* meant to test +exactly). + + + +## Header commands: configuring rustc + +Header commands are special comments that the test runner knows how to +interpret. They must appear before the Rust source in the test. They +are normally put after the short comment that explains the point of +this test. For example, this test uses the `// compile-flags` command +to specify a custom flag to give to rustc when the test is compiled: + +```rust,ignore +// Test the behavior of `0 - 1` when overflow checks are disabled. + +// compile-flags: -Coverflow-checks=off + +fn main() { + let x = 0 - 1; + ... +} +``` + +### Ignoring tests + +These are used to ignore the test in some situations, which means the test won't +be compiled or run. + +* `ignore-X` where `X` is a target detail or stage will ignore the + test accordingly (see below) +* `only-X` is like `ignore-X`, but will *only* run the test on that + target or stage +* `ignore-pretty` will not compile the pretty-printed test (this is + done to test the pretty-printer, but might not always work) +* `ignore-test` always ignores the test +* `ignore-lldb` and `ignore-gdb` will skip a debuginfo test on that + debugger. +* `ignore-gdb-version` can be used to ignore the test when certain gdb + versions are used + +Some examples of `X` in `ignore-X`: + +* Architecture: `aarch64`, `arm`, `asmjs`, `mips`, `wasm32`, `x86_64`, + `x86`, ... +* OS: `android`, `emscripten`, `freebsd`, `ios`, `linux`, `macos`, + `windows`, ... +* Environment (fourth word of the target triple): `gnu`, `msvc`, + `musl`. +* Pointer width: `32bit`, `64bit`. +* Stage: `stage0`, `stage1`, `stage2`. + +### Other Header Commands + +Here is a list of other header commands. This list is not +exhaustive. Header commands can generally be found by browsing the +`TestProps` structure found in [`header.rs`] from the compiletest +source. + +* `run-rustfix` for UI tests, indicates that the test produces + structured suggestions. The test writer should create a `.fixed` + file, which contains the source with the suggestions applied. + When the test is run, compiletest first checks that the correct + lint/warning is generated. Then, it applies the suggestion and + compares against `.fixed` (they must match). Finally, the fixed + source is compiled, and this compilation is required to succeed. + The `.fixed` file can also be generated automatically with the + `--bless` option, discussed [below](#bless). +* `min-gdb-version` specifies the minimum gdb version required for + this test; see also `ignore-gdb-version` +* `min-lldb-version` specifies the minimum lldb version required for + this test +* `rust-lldb` causes the lldb part of the test to only be run if the + lldb in use contains the Rust plugin +* `no-system-llvm` causes the test to be ignored if the system llvm is used +* `min-llvm-version` specifies the minimum llvm version required for + this test +* `min-system-llvm-version` specifies the minimum system llvm version + required for this test; the test is ignored if the system llvm is in + use and it doesn't meet the minimum version. This is useful when an + llvm feature has been backported to rust-llvm +* `ignore-llvm-version` can be used to skip the test when certain LLVM + versions are used. This takes one or two arguments; the first + argument is the first version to ignore. If no second argument is + given, all subsequent versions are ignored; otherwise, the second + argument is the last version to ignore. +* `compile-pass` for UI tests, indicates that the test is + supposed to compile, as opposed to the default where the test is + supposed to error out. +* `compile-flags` passes extra command-line args to the compiler, + e.g. `compile-flags -g` which forces debuginfo to be enabled. +* `should-fail` indicates that the test should fail; used for "meta + testing", where we test the compiletest program itself to check that + it will generate errors in appropriate scenarios. This header is + ignored for pretty-printer tests. +* `gate-test-X` where `X` is a feature marks the test as "gate test" + for feature X. Such tests are supposed to ensure that the compiler + errors when usage of a gated feature is attempted without the proper + `#![feature(X)]` tag. Each unstable lang feature is required to + have a gate test. + +[`header.rs`]: https://github.com/rust-lang/rust/tree/master/src/tools/compiletest/src/header.rs + + + +## Error annotations + +Error annotations specify the errors that the compiler is expected to +emit. They are "attached" to the line in source where the error is +located. + +* `~`: Associates the following error level and message with the + current line +* `~|`: Associates the following error level and message with the same + line as the previous comment +* `~^`: Associates the following error level and message with the + previous line. Each caret (`^`) that you add adds a line to this, so + `~^^^^^^^` is seven lines up. + +The error levels that you can have are: + +1. `ERROR` +2. `WARNING` +3. `NOTE` +4. `HELP` and `SUGGESTION`* + +\* **Note**: `SUGGESTION` must follow immediately after `HELP`. + +## Revisions + +Certain classes of tests support "revisions" (as of the time of this +writing, this includes run-pass, compile-fail, run-fail, and +incremental, though incremental tests are somewhat +different). Revisions allow a single test file to be used for multiple +tests. This is done by adding a special header at the top of the file: + +```rust +// revisions: foo bar baz +``` + +This will result in the test being compiled (and tested) three times, +once with `--cfg foo`, once with `--cfg bar`, and once with `--cfg +baz`. You can therefore use `#[cfg(foo)]` etc within the test to tweak +each of these results. + +You can also customize headers and expected error messages to a particular +revision. To do this, add `[foo]` (or `bar`, `baz`, etc) after the `//` +comment, like so: + +```rust +// A flag to pass in only for cfg `foo`: +//[foo]compile-flags: -Z verbose + +#[cfg(foo)] +fn test_foo() { + let x: usize = 32_u32; //[foo]~ ERROR mismatched types +} +``` + +Note that not all headers have meaning when customized to a revision. +For example, the `ignore-test` header (and all "ignore" headers) +currently only apply to the test as a whole, not to particular +revisions. The only headers that are intended to really work when +customized to a revision are error patterns and compiler flags. + + + +## Guide to the UI tests + +The UI tests are intended to capture the compiler's complete output, +so that we can test all aspects of the presentation. They work by +compiling a file (e.g., [`ui/hello_world/main.rs`][hw-main]), +capturing the output, and then applying some normalization (see +below). This normalized result is then compared against reference +files named `ui/hello_world/main.stderr` and +`ui/hello_world/main.stdout`. If either of those files doesn't exist, +the output must be empty (that is actually the case for +[this particular test][hw]). If the test run fails, we will print out +the current output, but it is also saved in +`build//test/ui/hello_world/main.stdout` (this path is +printed as part of the test failure message), so you can run `diff` +and so forth. + +[hw-main]: https://github.com/rust-lang/rust/blob/master/src/test/ui/hello_world/main.rs +[hw]: https://github.com/rust-lang/rust/blob/master/src/test/ui/hello_world/ + +### Tests that do not result in compile errors + +By default, a UI test is expected **not to compile** (in which case, +it should contain at least one `//~ ERROR` annotation). However, you +can also make UI tests where compilation is expected to succeed, and +you can even run the resulting program. Just add one of the following +[header commands](#header_commands): + +- `// compile-pass` – compilation should succeed but do + not run the resulting binary +- `// run-pass` – compilation should succeed and we should run the + resulting binary + + + +### Editing and updating the reference files + +If you have changed the compiler's output intentionally, or you are +making a new test, you can pass `--bless` to the test subcommand. E.g. +if some tests in `src/test/ui` are failing, you can run + +```text +./x.py test --stage 1 src/test/ui --bless +``` + +to automatically adjust the `.stderr`, `.stdout` or `.fixed` files of +all tests. Of course you can also target just specific tests with the +`--test-args your_test_name` flag, just like when running the tests. + +### Normalization + +The normalization applied is aimed at eliminating output difference +between platforms, mainly about filenames: + +- the test directory is replaced with `$DIR` +- all backslashes (`\`) are converted to forward slashes (`/`) (for Windows) +- all CR LF newlines are converted to LF + +Sometimes these built-in normalizations are not enough. In such cases, you +may provide custom normalization rules using the header commands, e.g. + +```rust +// normalize-stdout-test: "foo" -> "bar" +// normalize-stderr-32bit: "fn\(\) \(32 bits\)" -> "fn\(\) \($$PTR bits\)" +// normalize-stderr-64bit: "fn\(\) \(64 bits\)" -> "fn\(\) \($$PTR bits\)" +``` + +This tells the test, on 32-bit platforms, whenever the compiler writes +`fn() (32 bits)` to stderr, it should be normalized to read `fn() ($PTR bits)` +instead. Similar for 64-bit. The replacement is performed by regexes using +default regex flavor provided by `regex` crate. + +The corresponding reference file will use the normalized output to test both +32-bit and 64-bit platforms: + +```text +... + | + = note: source type: fn() ($PTR bits) + = note: target type: u16 (16 bits) +... +``` + +Please see [`ui/transmute/main.rs`][mrs] and [`main.stderr`][] for a +concrete usage example. + +[mrs]: https://github.com/rust-lang/rust/blob/master/src/test/ui/transmute/main.rs +[`main.stderr`]: https://github.com/rust-lang/rust/blob/master/src/test/ui/transmute/main.stderr + +Besides `normalize-stderr-32bit` and `-64bit`, one may use any target +information or stage supported by `ignore-X` here as well (e.g. +`normalize-stderr-windows` or simply `normalize-stderr-test` for unconditional +replacement). diff --git a/src/doc/rustc-guide/src/tests/intro.md b/src/doc/rustc-guide/src/tests/intro.md new file mode 100644 index 0000000000..4d509f3a8e --- /dev/null +++ b/src/doc/rustc-guide/src/tests/intro.md @@ -0,0 +1,237 @@ +# The compiler testing framework + +The Rust project runs a wide variety of different tests, orchestrated +by the build system (`x.py test`). The main test harness for testing +the compiler itself is a tool called compiletest (sources in the +[`src/tools/compiletest`]). This section gives a brief overview of how +the testing framework is setup, and then gets into some of the details +on [how to run tests](./running.html#ui) as well as +[how to add new tests](./adding.html). + +[`src/tools/compiletest`]: https://github.com/rust-lang/rust/tree/master/src/tools/compiletest + +## Compiletest test suites + +The compiletest tests are located in the tree in the [`src/test`] +directory. Immediately within you will see a series of subdirectories +(e.g. `ui`, `run-make`, and so forth). Each of those directories is +called a **test suite** – they house a group of tests that are run in +a distinct mode. + +[`src/test`]: https://github.com/rust-lang/rust/tree/master/src/test + +Here is a brief summary of the test suites as of this writing and what +they mean. In some cases, the test suites are linked to parts of the manual +that give more details. + +- [`ui`](./adding.html#ui) – tests that check the exact + stdout/stderr from compilation and/or running the test +- `run-pass` – tests that are expected to compile and execute + successfully (no panics) + - `run-pass-valgrind` – tests that ought to run with valgrind +- `run-fail` – tests that are expected to compile but then panic + during execution +- `compile-fail` – tests that are expected to fail compilation. +- `parse-fail` – tests that are expected to fail to parse +- `pretty` – tests targeting the Rust "pretty printer", which + generates valid Rust code from the AST +- `debuginfo` – tests that run in gdb or lldb and query the debug info +- `codegen` – tests that compile and then test the generated LLVM + code to make sure that the optimizations we want are taking effect. +- `mir-opt` – tests that check parts of the generated MIR to make + sure we are building things correctly or doing the optimizations we + expect. +- `incremental` – tests for incremental compilation, checking that + when certain modifications are performed, we are able to reuse the + results from previous compilations. +- `run-make` – tests that basically just execute a `Makefile`; the + ultimate in flexibility but quite annoying to write. +- `rustdoc` – tests for rustdoc, making sure that the generated files + contain the expected documentation. +- `*-fulldeps` – same as above, but indicates that the test depends + on things other than `libstd` (and hence those things must be built) + +## Other Tests + +The Rust build system handles running tests for various other things, +including: + +- **Tidy** – This is a custom tool used for validating source code + style and formatting conventions, such as rejecting long lines. + There is more information in the + [section on coding conventions](../conventions.html#formatting). + + Example: `./x.py test src/tools/tidy` + +- **Unit tests** – The Rust standard library and many of the Rust packages + include typical Rust `#[test]` unittests. Under the hood, `x.py` will run + `cargo test` on each package to run all the tests. + + Example: `./x.py test src/libstd` + +- **Doc tests** – Example code embedded within Rust documentation is executed + via `rustdoc --test`. Examples: + + `./x.py test src/doc` – Runs `rustdoc --test` for all documentation in + `src/doc`. + + `./x.py test --doc src/libstd` – Runs `rustdoc --test` on the standard + library. + +- **Link checker** – A small tool for verifying `href` links within + documentation. + + Example: `./x.py test src/tools/linkchecker` + +- **Dist check** – This verifies that the source distribution tarball created + by the build system will unpack, build, and run all tests. + + Example: `./x.py test distcheck` + +- **Tool tests** – Packages that are included with Rust have all of their + tests run as well (typically by running `cargo test` within their + directory). This includes things such as cargo, clippy, rustfmt, rls, miri, + bootstrap (testing the Rust build system itself), etc. + +- **Cargo test** – This is a small tool which runs `cargo test` on a few + significant projects (such as `servo`, `ripgrep`, `tokei`, etc.) just to + ensure there aren't any significant regressions. + + Example: `./x.py test src/tools/cargotest` + +## Testing infrastructure + +When a Pull Request is opened on Github, [Travis] will automatically launch a +build that will run all tests on a single configuration (x86-64 linux). In +essence, it runs `./x.py test` after building. + +The integration bot [bors] is used for coordinating merges to the master +branch. When a PR is approved, it goes into a [queue] where merges are tested +one at a time on a wide set of platforms using Travis and [Appveyor] +(currently over 50 different configurations). Most platforms only run the +build steps, some run a restricted set of tests, only a subset run the full +suite of tests (see Rust's [platform tiers]). + +[Travis]: https://travis-ci.org/rust-lang/rust +[bors]: https://github.com/servo/homu +[queue]: https://buildbot2.rust-lang.org/homu/queue/rust +[Appveyor]: https://ci.appveyor.com/project/rust-lang/rust +[platform tiers]: https://forge.rust-lang.org/platform-support.html + +## Testing with Docker images + +The Rust tree includes [Docker] image definitions for the platforms used on +Travis in [src/ci/docker]. The script [src/ci/docker/run.sh] is used to build +the Docker image, run it, build Rust within the image, and run the tests. + +> TODO: What is a typical workflow for testing/debugging on a platform that +> you don't have easy access to? Do people build Docker images and enter them +> to test things out? + +[Docker]: https://www.docker.com/ +[src/ci/docker]: https://github.com/rust-lang/rust/tree/master/src/ci/docker +[src/ci/docker/run.sh]: https://github.com/rust-lang/rust/blob/master/src/ci/docker/run.sh + +## Testing on emulators + +Some platforms are tested via an emulator for architectures that aren't +readily available. There is a set of tools for orchestrating running the +tests within the emulator. Platforms such as `arm-android` and +`arm-unknown-linux-gnueabihf` are set up to automatically run the tests under +emulation on Travis. The following will take a look at how a target's tests +are run under emulation. + +The Docker image for [armhf-gnu] includes [QEMU] to emulate the ARM CPU +architecture. Included in the Rust tree are the tools [remote-test-client] +and [remote-test-server] which are programs for sending test programs and +libraries to the emulator, and running the tests within the emulator, and +reading the results. The Docker image is set up to launch +`remote-test-server` and the build tools use `remote-test-client` to +communicate with the server to coordinate running tests (see +[src/bootstrap/test.rs]). + +> TODO: What are the steps for manually running tests within an emulator? +> `./src/ci/docker/run.sh armhf-gnu` will do everything, but takes hours to +> run and doesn't offer much help with interacting within the emulator. +> +> Is there any support for emulating other (non-Android) platforms, such as +> running on an iOS emulator? +> +> Is there anything else interesting that can be said here about running tests +> remotely on real hardware? +> +> It's also unclear to me how the wasm or asm.js tests are run. + +[armhf-gnu]: https://github.com/rust-lang/rust/tree/master/src/ci/docker/armhf-gnu +[QEMU]: https://www.qemu.org/ +[remote-test-client]: https://github.com/rust-lang/rust/tree/master/src/tools/remote-test-client +[remote-test-server]: https://github.com/rust-lang/rust/tree/master/src/tools/remote-test-server +[src/bootstrap/test.rs]: https://github.com/rust-lang/rust/tree/master/src/bootstrap/test.rs + +## Crater + +[Crater](https://github.com/rust-lang-nursery/crater) is a tool for compiling +and running tests for _every_ crate on [crates.io](https://crates.io) (and a +few on GitHub). It is mainly used for checking for extent of breakage when +implementing potentially breaking changes and ensuring lack of breakage by +running beta vs stable compiler versions. + +### When to run Crater + +You should request a crater run if your PR makes large changes to the compiler +or could cause breakage. If you are unsure, feel free to ask your PR's reviewer. + +### Requesting Crater Runs + +The rust team maintains a few machines that can be used for running crater runs +on the changes introduced by a PR. If your PR needs a crater run, leave a +comment for the triage team in the PR thread. Please inform the team whether +you require a "check-only" crater run, a "build only" crater run, or a +"build-and-test" crater run. The difference is primarily in time; the +conservative (if you're not sure) option is to go for the build-and-test run. +If making changes that will only have an effect at compile-time (e.g., +implementing a new trait) then you only need a check run. + +Your PR will be enqueued by the triage team and the results will be posted when +they are ready. Check runs will take around ~3-4 days, with the other two +taking 5-6 days on average. + +While crater is really useful, it is also important to be aware of a few +caveats: + +- Not all code is on crates.io! There is a lot of code in repos on GitHub and + elsewhere. Also, companies may not wish to publish their code. Thus, a + successful crater run is not a magically green light that there will be no + breakage; you still need to be careful. + +- Crater only runs Linux builds on x86_64. Thus, other architectures and + platforms are not tested. Critically, this includes Windows. + +- Many crates are not tested. This could be for a lot of reasons, including + that the crate doesn't compile any more (e.g. used old nightly features), + has broken or flaky tests, requires network access, or other reasons. + +- Before crater can be run, `@bors try` needs to succeed in building artifacts. + This means that if your code doesn't compile, you cannot run crater. + +## Perf runs + +A lot of work is put into improving the performance of the compiler and +preventing performance regressions. A "perf run" is used to compare the +performance of the compiler in different configurations for a large collection +of popular crates. Different configurations include "fresh builds", builds +with incremental compilation, etc. + +The result of a perf run is a comparison between two versions of the +compiler (by their commit hashes). + +You should request a perf run if your PR may affect performance, especially +if it can affect performance adversely. + +## Further reading + +The following blog posts may also be of interest: + +- brson's classic ["How Rust is tested"][howtest] + +[howtest]: https://brson.github.io/2017/07/10/how-rust-is-tested diff --git a/src/doc/rustc-guide/src/tests/running.md b/src/doc/rustc-guide/src/tests/running.md new file mode 100644 index 0000000000..f8889c8a4b --- /dev/null +++ b/src/doc/rustc-guide/src/tests/running.md @@ -0,0 +1,121 @@ +# Running tests + +You can run the tests using `x.py`. The most basic command – which +you will almost never want to use! – is as follows: + +```bash +> ./x.py test +``` + +This will build the full stage 2 compiler and then run the whole test +suite. You probably don't want to do this very often, because it takes +a very long time, and anyway bors / travis will do it for you. (Often, +I will run this command in the background after opening a PR that I +think is done, but rarely otherwise. -nmatsakis) + +The test results are cached and previously successful tests are +`ignored` during testing. The stdout/stderr contents as well as a +timestamp file for every test can be found under `build/ARCH/test/`. +To force-rerun a test (e.g. in case the test runner fails to notice +a change) you can simply remove the timestamp file. + +## Running a subset of the test suites + +When working on a specific PR, you will usually want to run a smaller +set of tests, and with a stage 1 build. For example, a good "smoke +test" that can be used after modifying rustc to see if things are +generally working correctly would be the following: + +```bash +> ./x.py test --stage 1 src/test/{ui,compile-fail,run-pass} +``` + +This will run the `ui`, `compile-fail`, and `run-pass` test suites, +and only with the stage 1 build. Of course, the choice of test suites +is somewhat arbitrary, and may not suit the task you are doing. For +example, if you are hacking on debuginfo, you may be better off with +the debuginfo test suite: + +```bash +> ./x.py test --stage 1 src/test/debuginfo +``` + +### Run only the tidy script + +```bash +> ./x.py test src/tools/tidy +``` + +### Run tests on the standard library + +```bash +> ./x.py test src/libstd +``` + +### Run tests on the standard library and run the tidy script + +```bash +> ./x.py test src/libstd src/tools/tidy +``` + +### Run tests on the standard library using a stage 1 compiler + +```bash +> ./x.py test src/libstd --stage 1 +``` + +By listing which test suites you want to run you avoid having to run +tests for components you did not change at all. + +**Warning:** Note that bors only runs the tests with the full stage 2 +build; therefore, while the tests **usually** work fine with stage 1, +there are some limitations. In particular, the stage1 compiler doesn't +work well with procedural macros or custom derive tests. + +## Running an individual test + +Another common thing that people want to do is to run an **individual +test**, often the test they are trying to fix. One way to do this is +to invoke `x.py` with the `--test-args` option: + +```bash +> ./x.py test --stage 1 src/test/ui --test-args issue-1234 +``` + +Under the hood, the test runner invokes the standard rust test runner +(the same one you get with `#[test]`), so this command would wind up +filtering for tests that include "issue-1234" in the name. + +## Using incremental compilation + +You can further enable the `--incremental` flag to save additional +time in subsequent rebuilds: + +```bash +> ./x.py test --stage 1 src/test/ui --incremental --test-args issue-1234 +``` + +If you don't want to include the flag with every command, you can +enable it in the `config.toml`, too: + +```toml +# Whether to always use incremental compilation when building rustc +incremental = true +``` + +Note that incremental compilation will use more disk space than usual. +If disk space is a concern for you, you might want to check the size +of the `build` directory from time to time. + +## Running tests manually + +Sometimes it's easier and faster to just run the test by hand. Most tests are +just `rs` files, so you can do something like + +```bash +> rustc +stage1 src/test/ui/issue-1234.rs +``` + +This is much faster, but doesn't always work. For example, some tests +include directives that specify specific compiler flags, or which rely +on other crates, and they may not run the same without those options. diff --git a/src/doc/rustc-guide/src/the-parser.md b/src/doc/rustc-guide/src/the-parser.md new file mode 100644 index 0000000000..ac902d9154 --- /dev/null +++ b/src/doc/rustc-guide/src/the-parser.md @@ -0,0 +1,44 @@ +# The Parser + +The parser is responsible for converting raw Rust source code into a structured +form which is easier for the compiler to work with, usually called an [*Abstract +Syntax Tree*][ast]. An AST mirrors the structure of a Rust program in memory, +using a `Span` to link a particular AST node back to its source text. + +The bulk of the parser lives in the [libsyntax] crate. + +Like most parsers, the parsing process is composed of two main steps, + +- lexical analysis – turn a stream of characters into a stream of token trees +- parsing – turn the token trees into an AST + +The `syntax` crate contains several main players, + +- a [`SourceMap`] for mapping AST nodes to their source code +- the [ast module] contains types corresponding to each AST node +- a [`StringReader`] for lexing source code into tokens +- the [parser module] and [`Parser`] struct are in charge of actually parsing + tokens into AST nodes, +- and a [visit module] for walking the AST and inspecting or mutating the AST + nodes. + +The main entrypoint to the parser is via the various `parse_*` functions in the +[parser module]. They let you do things like turn a [`SourceFile`][sourcefile] +(e.g. the source in a single file) into a token stream, create a parser from +the token stream, and then execute the parser to get a `Crate` (the root AST +node). + +To minimise the amount of copying that is done, both the `StringReader` and +`Parser` have lifetimes which bind them to the parent `ParseSess`. This contains +all the information needed while parsing, as well as the `SourceMap` itself. + +[libsyntax]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/index.html +[rustc_errors]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_errors/index.html +[ast]: https://en.wikipedia.org/wiki/Abstract_syntax_tree +[`SourceMap`]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceMap.html +[ast module]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/ast/index.html +[parser module]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/index.html +[`Parser`]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/parser/struct.Parser.html +[`StringReader`]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/parse/lexer/struct.StringReader.html +[visit module]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/visit/index.html +[sourcefile]: https://doc.rust-lang.org/nightly/nightly-rustc/syntax/source_map/struct.SourceFile.html diff --git a/src/doc/rustc-guide/src/traits/associated-types.md b/src/doc/rustc-guide/src/traits/associated-types.md new file mode 100644 index 0000000000..d35fb71e17 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/associated-types.md @@ -0,0 +1,168 @@ +# Equality and associated types + +This section covers how the trait system handles equality between +associated types. The full system consists of several moving parts, +which we will introduce one by one: + +- Projection and the `Normalize` predicate +- Placeholder associated type projections +- The `ProjectionEq` predicate +- Integration with unification + +## Associated type projection and normalization + +When a trait defines an associated type (e.g., +[the `Item` type in the `IntoIterator` trait][intoiter-item]), that +type can be referenced by the user using an **associated type +projection** like ` as IntoIterator>::Item`. + +> Often, people will use the shorthand syntax `T::Item`. Presently, that +> syntax is expanded during ["type collection"](../type-checking.html) into the +> explicit form, though that is something we may want to change in the future. + +[intoiter-item]: https://doc.rust-lang.org/nightly/core/iter/trait.IntoIterator.html#associatedtype.Item + + + +In some cases, associated type projections can be **normalized** – +that is, simplified – based on the types given in an impl. So, to +continue with our example, the impl of `IntoIterator` for `Option` +declares (among other things) that `Item = T`: + +```rust,ignore +impl IntoIterator for Option { + type Item = T; + ... +} +``` + +This means we can normalize the projection ` as +IntoIterator>::Item` to just `u32`. + +In this case, the projection was a "monomorphic" one – that is, it +did not have any type parameters. Monomorphic projections are special +because they can **always** be fully normalized. + +Often, we can normalize other associated type projections as well. For +example, ` as IntoIterator>::Item`, where `?T` is an inference +variable, can be normalized to just `?T`. + +In our logic, normalization is defined by a predicate +`Normalize`. The `Normalize` clauses arise only from +impls. For example, the `impl` of `IntoIterator` for `Option` that +we saw above would be lowered to a program clause like so: + +```text +forall { + Normalize( as IntoIterator>::Item -> T) :- + Implemented(Option: IntoIterator) +} +``` + +where in this case, the one `Implemented` condition is always true. + +> Since we do not permit quantification over traits, this is really more like +> a family of program clauses, one for each associated type. + +We could apply that rule to normalize either of the examples that +we've seen so far. + +## Placeholder associated types + +Sometimes however we want to work with associated types that cannot be +normalized. For example, consider this function: + +```rust,ignore +fn foo(...) { ... } +``` + +In this context, how would we normalize the type `T::Item`? + +Without knowing what `T` is, we can't really do so. To represent this case, +we introduce a type called a **placeholder associated type projection**. This +is written like so: `(IntoIterator::Item)`. + +You may note that it looks a lot like a regular type (e.g., `Option`), +except that the "name" of the type is `(IntoIterator::Item)`. This is not an +accident: placeholder associated type projections work just like ordinary +types like `Vec` when it comes to unification. That is, they are only +considered equal if (a) they are both references to the same associated type, +like `IntoIterator::Item` and (b) their type arguments are equal. + +Placeholder associated types are never written directly by the user. +They are used internally by the trait system only, as we will see +shortly. + +In rustc, they correspond to the `TyKind::UnnormalizedProjectionTy` enum +variant, declared in [`librustc/ty/sty.rs`][sty]. In chalk, we use an +`ApplicationTy` with a name living in a special namespace dedicated to +placeholder associated types (see the `TypeName` enum declared in +[`chalk-ir/src/lib.rs`][chalk_type_name]). + +[sty]: https://github.com/rust-lang/rust/blob/master/src/librustc/ty/sty.rs +[chalk_type_name]: https://github.com/rust-lang-nursery/chalk/blob/master/chalk-ir/src/lib.rs + +## Projection equality + +So far we have seen two ways to answer the question of "When can we +consider an associated type projection equal to another type?": + +- the `Normalize` predicate could be used to transform projections when we + knew which impl applied; +- **placeholder** associated types can be used when we don't. This is also + known as **lazy normalization**. + +We now introduce the `ProjectionEq` predicate to bring those two cases +together. The `ProjectionEq` predicate looks like so: + +```text +ProjectionEq(::Item = U) +``` + +and we will see that it can be proven *either* via normalization or +via the placeholder type. As part of lowering an associated type declaration from +some trait, we create two program clauses for `ProjectionEq`: + +```text +forall { + ProjectionEq(::Item = U) :- + Normalize(::Item -> U) +} + +forall { + ProjectionEq(::Item = (IntoIterator::Item)) +} +``` + +These are the only two `ProjectionEq` program clauses we ever make for +any given associated item. + +## Integration with unification + +Now we are ready to discuss how associated type equality integrates +with unification. As described in the +[type inference](../type-inference.html) section, unification is +basically a procedure with a signature like this: + +```text +Unify(A, B) = Result<(Subgoals, RegionConstraints), NoSolution> +``` + +In other words, we try to unify two things A and B. That procedure +might just fail, in which case we get back `Err(NoSolution)`. This +would happen, for example, if we tried to unify `u32` and `i32`. + +The key point is that, on success, unification can also give back to +us a set of subgoals that still remain to be proven. (It can also give +back region constraints, but those are not relevant here). + +Whenever unification encounters a non-placeholder associated type +projection P being equated with some other type T, it always succeeds, +but it produces a subgoal `ProjectionEq(P = T)` that is propagated +back up. Thus it falls to the ordinary workings of the trait system +to process that constraint. + +> If we unify two projections P1 and P2, then unification produces a +> variable X and asks us to prove that `ProjectionEq(P1 = X)` and +> `ProjectionEq(P2 = X)`. (That used to be needed in an older system to +> prevent cycles; I rather doubt it still is. -nmatsakis) diff --git a/src/doc/rustc-guide/src/traits/bibliography.md b/src/doc/rustc-guide/src/traits/bibliography.md new file mode 100644 index 0000000000..b8f345d9d2 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/bibliography.md @@ -0,0 +1,29 @@ +# Bibliography + +If you'd like to read more background material, here are some +recommended texts and papers: + +[Programming with Higher-order Logic][phl], by Dale Miller and Gopalan +Nadathur, covers the key concepts of Lambda prolog. Although it's a +slim little volume, it's the kind of book where you learn something +new every time you open it. + +[phl]: https://www.amazon.com/Programming-Higher-Order-Logic-Dale-Miller/dp/052187940X + + + +["A proof procedure for the logic of Hereditary Harrop formulas"][pphhf], +by Gopalan Nadathur. This paper covers the basics of universes, +environments, and Lambda Prolog-style proof search. Quite readable. + +[pphhf]: https://dl.acm.org/citation.cfm?id=868380 + + + +["A new formulation of tabled resolution with delay"][nftrd], by +[Theresa Swift]. This paper gives a kind of abstract treatment of the +SLG formulation that is the basis for our on-demand solver. + +[nftrd]: https://dl.acm.org/citation.cfm?id=651202 +[ts]: http://www3.cs.stonybrook.edu/~tswift/ +[Theresa Swift]: http://www3.cs.stonybrook.edu/~tswift/ diff --git a/src/doc/rustc-guide/src/traits/caching.md b/src/doc/rustc-guide/src/traits/caching.md new file mode 100644 index 0000000000..c963aafc4a --- /dev/null +++ b/src/doc/rustc-guide/src/traits/caching.md @@ -0,0 +1,67 @@ +# Caching and subtle considerations therewith + +In general, we attempt to cache the results of trait selection. This +is a somewhat complex process. Part of the reason for this is that we +want to be able to cache results even when all the types in the trait +reference are not fully known. In that case, it may happen that the +trait selection process is also influencing type variables, so we have +to be able to not only cache the *result* of the selection process, +but *replay* its effects on the type variables. + +## An example + +The high-level idea of how the cache works is that we first replace +all unbound inference variables with placeholder versions. Therefore, +if we had a trait reference `usize : Foo<$t>`, where `$t` is an unbound +inference variable, we might replace it with `usize : Foo<$0>`, where +`$0` is a placeholder type. We would then look this up in the cache. + +If we found a hit, the hit would tell us the immediate next step to +take in the selection process (e.g. apply impl #22, or apply where +clause `X : Foo`). + +On the other hand, if there is no hit, we need to go through the [selection +process] from scratch. Suppose, we come to the conclusion that the only +possible impl is this one, with def-id 22: + +[selection process]: ./resolution.html#selection + +```rust,ignore +impl Foo for usize { ... } // Impl #22 +``` + +We would then record in the cache `usize : Foo<$0> => ImplCandidate(22)`. Next +we would [confirm] `ImplCandidate(22)`, which would (as a side-effect) unify +`$t` with `isize`. + +[confirm]: ./resolution.html#confirmation + +Now, at some later time, we might come along and see a `usize : +Foo<$u>`. When replaced with a placeholder, this would yield `usize : Foo<$0>`, just as +before, and hence the cache lookup would succeed, yielding +`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would +(as a side-effect) unify `$u` with `isize`. + +## Where clauses and the local vs global cache + +One subtle interaction is that the results of trait lookup will vary +depending on what where clauses are in scope. Therefore, we actually +have *two* caches, a local and a global cache. The local cache is +attached to the [`ParamEnv`], and the global cache attached to the +[`tcx`]. We use the local cache whenever the result might depend on the +where clauses that are in scope. The determination of which cache to +use is done by the method `pick_candidate_cache` in `select.rs`. At +the moment, we use a very simple, conservative rule: if there are any +where-clauses in scope, then we use the local cache. We used to try +and draw finer-grained distinctions, but that led to a serious of +annoying and weird bugs like [#22019] and [#18290]. This simple rule seems +to be pretty clearly safe and also still retains a very high hit rate +(~95% when compiling rustc). + +**TODO**: it looks like `pick_candidate_cache` no longer exists. In +general, is this section still accurate at all? + +[`ParamEnv`]: ../param_env.html +[`tcx`]: ../ty.html +[#18290]: https://github.com/rust-lang/rust/issues/18290 +[#22019]: https://github.com/rust-lang/rust/issues/22019 diff --git a/src/doc/rustc-guide/src/traits/canonical-queries.md b/src/doc/rustc-guide/src/traits/canonical-queries.md new file mode 100644 index 0000000000..cbf7d880d0 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/canonical-queries.md @@ -0,0 +1,252 @@ +# Canonical queries + +The "start" of the trait system is the **canonical query** (these are +both queries in the more general sense of the word – something you +would like to know the answer to – and in the +[rustc-specific sense](../query.html)). The idea is that the type +checker or other parts of the system, may in the course of doing their +thing want to know whether some trait is implemented for some type +(e.g., is `u32: Debug` true?). Or they may want to +[normalize some associated type](./associated-types.html). + +This section covers queries at a fairly high level of abstraction. The +subsections look a bit more closely at how these ideas are implemented +in rustc. + +## The traditional, interactive Prolog query + +In a traditional Prolog system, when you start a query, the solver +will run off and start supplying you with every possible answer it can +find. So given something like this: + +```text +?- Vec: AsRef +``` + +The solver might answer: + +```text +Vec: AsRef<[i32]> + continue? (y/n) +``` + +This `continue` bit is interesting. The idea in Prolog is that the +solver is finding **all possible** instantiations of your query that +are true. In this case, if we instantiate `?U = [i32]`, then the query +is true (note that a traditional Prolog interface does not, directly, +tell us a value for `?U`, but we can infer one by unifying the +response with our original query – Rust's solver gives back a +substitution instead). If we were to hit `y`, the solver might then +give us another possible answer: + +```text +Vec: AsRef> + continue? (y/n) +``` + +This answer derives from the fact that there is a reflexive impl +(`impl AsRef for T`) for `AsRef`. If were to hit `y` again, +then we might get back a negative response: + +```text +no +``` + +Naturally, in some cases, there may be no possible answers, and hence +the solver will just give me back `no` right away: + +```text +?- Box: Copy + no +``` + +In some cases, there might be an infinite number of responses. So for +example if I gave this query, and I kept hitting `y`, then the solver +would never stop giving me back answers: + +```text +?- Vec: Clone + Vec: Clone + continue? (y/n) + Vec>: Clone + continue? (y/n) + Vec>>: Clone + continue? (y/n) + Vec>>>: Clone + continue? (y/n) +``` + +As you can imagine, the solver will gleefully keep adding another +layer of `Box` until we ask it to stop, or it runs out of memory. + +Another interesting thing is that queries might still have variables +in them. For example: + +```text +?- Rc: Clone +``` + +might produce the answer: + +```text +Rc: Clone + continue? (y/n) +``` + +After all, `Rc` is true **no matter what type `?T` is**. + + + +## A trait query in rustc + +The trait queries in rustc work somewhat differently. Instead of +trying to enumerate **all possible** answers for you, they are looking +for an **unambiguous** answer. In particular, when they tell you the +value for a type variable, that means that this is the **only possible +instantiation** that you could use, given the current set of impls and +where-clauses, that would be provable. (Internally within the solver, +though, they can potentially enumerate all possible answers. See +[the description of the SLG solver](./slg.html) for details.) + +The response to a trait query in rustc is typically a +`Result, NoSolution>` (where the `T` will vary a bit +depending on the query itself). The `Err(NoSolution)` case indicates +that the query was false and had no answers (e.g., `Box: Copy`). +Otherwise, the `QueryResult` gives back information about the possible answer(s) +we did find. It consists of four parts: + +- **Certainty:** tells you how sure we are of this answer. It can have two + values: + - `Proven` means that the result is known to be true. + - This might be the result for trying to prove `Vec: Clone`, + say, or `Rc: Clone`. + - `Ambiguous` means that there were things we could not yet prove to + be either true *or* false, typically because more type information + was needed. (We'll see an example shortly.) + - This might be the result for trying to prove `Vec: Clone`. +- **Var values:** Values for each of the unbound inference variables + (like `?T`) that appeared in your original query. (Remember that in Prolog, + we had to infer these.) + - As we'll see in the example below, we can get back var values even + for `Ambiguous` cases. +- **Region constraints:** these are relations that must hold between + the lifetimes that you supplied as inputs. We'll ignore these here, + but see the + [section on handling regions in traits](./regions.html) for + more details. +- **Value:** The query result also comes with a value of type `T`. For + some specialized queries – like normalizing associated types – + this is used to carry back an extra result, but it's often just + `()`. + +### Examples + +Let's work through an example query to see what all the parts mean. +Consider [the `Borrow` trait][borrow]. This trait has a number of +impls; among them, there are these two (for clarity, I've written the +`Sized` bounds explicitly): + +[borrow]: https://doc.rust-lang.org/std/borrow/trait.Borrow.html + +```rust,ignore +impl Borrow for T where T: ?Sized +impl Borrow<[T]> for Vec where T: Sized +``` + +**Example 1.** Imagine we are type-checking this (rather artificial) +bit of code: + +```rust,ignore +fn foo(a: A, vec_b: Option) where A: Borrow { } + +fn main() { + let mut t: Vec<_> = vec![]; // Type: Vec + let mut u: Option<_> = None; // Type: Option + foo(t, u); // Example 1: requires `Vec: Borrow` + ... +} +``` + +As the comments indicate, we first create two variables `t` and `u`; +`t` is an empty vector and `u` is a `None` option. Both of these +variables have unbound inference variables in their type: `?T` +represents the elements in the vector `t` and `?U` represents the +value stored in the option `u`. Next, we invoke `foo`; comparing the +signature of `foo` to its arguments, we wind up with `A = Vec` and +`B = ?U`.Therefore, the where clause on `foo` requires that `Vec: +Borrow`. This is thus our first example trait query. + +There are many possible solutions to the query `Vec: Borrow`; +for example: + +- `?U = Vec`, +- `?U = [?T]`, +- `?T = u32, ?U = [u32]` +- and so forth. + +Therefore, the result we get back would be as follows (I'm going to +ignore region constraints and the "value"): + +- Certainty: `Ambiguous` – we're not sure yet if this holds +- Var values: `[?T = ?T, ?U = ?U]` – we learned nothing about the values of + the variables + +In short, the query result says that it is too soon to say much about +whether this trait is proven. During type-checking, this is not an +immediate error: instead, the type checker would hold on to this +requirement (`Vec: Borrow`) and wait. As we'll see in the next +example, it may happen that `?T` and `?U` wind up constrained from +other sources, in which case we can try the trait query again. + +**Example 2.** We can now extend our previous example a bit, +and assign a value to `u`: + +```rust,ignore +fn foo(a: A, vec_b: Option) where A: Borrow { } + +fn main() { + // What we saw before: + let mut t: Vec<_> = vec![]; // Type: Vec + let mut u: Option<_> = None; // Type: Option + foo(t, u); // `Vec: Borrow` => ambiguous + + // New stuff: + u = Some(vec![]); // ?U = Vec +} +``` + +As a result of this assignment, the type of `u` is forced to be +`Option>`, where `?V` represents the element type of the +vector. This in turn implies that `?U` is [unified] to `Vec`. + +[unified]: ../type-checking.html + +Let's suppose that the type checker decides to revisit the +"as-yet-unproven" trait obligation we saw before, `Vec: +Borrow`. `?U` is no longer an unbound inference variable; it now +has a value, `Vec`. So, if we "refresh" the query with that value, we get: + +```text +Vec: Borrow> +``` + +This time, there is only one impl that applies, the reflexive impl: + +```text +impl Borrow for T where T: ?Sized +``` + +Therefore, the trait checker will answer: + +- Certainty: `Proven` +- Var values: `[?T = ?T, ?V = ?T]` + +Here, it is saying that we have indeed proven that the obligation +holds, and we also know that `?T` and `?V` are the same type (but we +don't know what that type is yet!). + +(In fact, as the function ends here, the type checker would give an +error at this point, since the element types of `t` and `u` are still +not yet known, even though they are known to be the same.) + + diff --git a/src/doc/rustc-guide/src/traits/canonicalization.md b/src/doc/rustc-guide/src/traits/canonicalization.md new file mode 100644 index 0000000000..ca81d6fd16 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/canonicalization.md @@ -0,0 +1,256 @@ +# Canonicalization + +Canonicalization is the process of **isolating** an inference value +from its context. It is a key part of implementing +[canonical queries][cq], and you may wish to read the parent chapter +to get more context. + +Canonicalization is really based on a very simple concept: every +[inference variable](../type-inference.html#vars) is always in one of +two states: either it is **unbound**, in which case we don't know yet +what type it is, or it is **bound**, in which case we do. So to +isolate some data-structure T that contains types/regions from its +environment, we just walk down and find the unbound variables that +appear in T; those variables get replaced with "canonical variables", +starting from zero and numbered in a fixed order (left to right, for +the most part, but really it doesn't matter as long as it is +consistent). + +[cq]: ./canonical-queries.html + +So, for example, if we have the type `X = (?T, ?U)`, where `?T` and +`?U` are distinct, unbound inference variables, then the canonical +form of `X` would be `(?0, ?1)`, where `?0` and `?1` represent these +**canonical placeholders**. Note that the type `Y = (?U, ?T)` also +canonicalizes to `(?0, ?1)`. But the type `Z = (?T, ?T)` would +canonicalize to `(?0, ?0)` (as would `(?U, ?U)`). In other words, the +exact identity of the inference variables is not important – unless +they are repeated. + +We use this to improve caching as well as to detect cycles and other +things during trait resolution. Roughly speaking, the idea is that if +two trait queries have the same canonical form, then they will get +the same answer. That answer will be expressed in terms of the +canonical variables (`?0`, `?1`), which we can then map back to the +original variables (`?T`, `?U`). + +## Canonicalizing the query + +To see how it works, imagine that we are asking to solve the following +trait query: `?A: Foo<'static, ?B>`, where `?A` and `?B` are unbound. +This query contains two unbound variables, but it also contains the +lifetime `'static`. The trait system generally ignores all lifetimes +and treats them equally, so when canonicalizing, we will *also* +replace any [free lifetime](../appendix/background.html#free-vs-bound) with a +canonical variable (Note that `'static` is actually a _free_ lifetime +variable here. We are not considering it in the typing context of the whole +program but only in the context of this trait reference. Mathematically, we +are not quantifying over the whole program, but only this obligation). +Therefore, we get the following result: + +```text +?0: Foo<'?1, ?2> +``` + +Sometimes we write this differently, like so: + +```text +for { ?0: Foo<'?1, ?2> } +``` + +This `for<>` gives some information about each of the canonical +variables within. In this case, each `T` indicates a type variable, +so `?0` and `?2` are types; the `L` indicates a lifetime variable, so +`?1` is a lifetime. The `canonicalize` method *also* gives back a +`CanonicalVarValues` array OV with the "original values" for each +canonicalized variable: + +```text +[?A, 'static, ?B] +``` + +We'll need this vector OV later, when we process the query response. + +## Executing the query + +Once we've constructed the canonical query, we can try to solve it. +To do so, we will wind up creating a fresh inference context and +**instantiating** the canonical query in that context. The idea is that +we create a substitution S from the canonical form containing a fresh +inference variable (of suitable kind) for each canonical variable. +So, for our example query: + +```text +for { ?0: Foo<'?1, ?2> } +``` + +the substitution S might be: + +```text +S = [?A, '?B, ?C] +``` + +We can then replace the bound canonical variables (`?0`, etc) with +these inference variables, yielding the following fully instantiated +query: + +```text +?A: Foo<'?B, ?C> +``` + +Remember that substitution S though! We're going to need it later. + +OK, now that we have a fresh inference context and an instantiated +query, we can go ahead and try to solve it. The trait solver itself is +explained in more detail in [another section](./slg.html), but +suffice to say that it will compute a [certainty value][cqqr] (`Proven` or +`Ambiguous`) and have side-effects on the inference variables we've +created. For example, if there were only one impl of `Foo`, like so: + +[cqqr]: ./canonical-queries.html#query-response + +```rust,ignore +impl<'a, X> Foo<'a, X> for Vec +where X: 'a +{ ... } +``` + +then we might wind up with a certainty value of `Proven`, as well as +creating fresh inference variables `'?D` and `?E` (to represent the +parameters on the impl) and unifying as follows: + +- `'?B = '?D` +- `?A = Vec` +- `?C = ?E` + +We would also accumulate the region constraint `?E: '?D`, due to the +where clause. + +In order to create our final query result, we have to "lift" these +values out of the query's inference context and into something that +can be reapplied in our original inference context. We do that by +**re-applying canonicalization**, but to the **query result**. + +## Canonicalizing the query result + +As discussed in [the parent section][cqqr], most trait queries wind up +with a result that brings together a "certainty value" `certainty`, a +result substitution `var_values`, and some region constraints. To +create this, we wind up re-using the substitution S that we created +when first instantiating our query. To refresh your memory, we had a query + +```text +for { ?0: Foo<'?1, ?2> } +``` + +for which we made a substutition S: + +```text +S = [?A, '?B, ?C] +``` + +We then did some work which unified some of those variables with other things. +If we "refresh" S with the latest results, we get: + +```text +S = [Vec, '?D, ?E] +``` + +These are precisely the new values for the three input variables from +our original query. Note though that they include some new variables +(like `?E`). We can make those go away by canonicalizing again! We don't +just canonicalize S, though, we canonicalize the whole query response QR: + +```text +QR = { + certainty: Proven, // or whatever + var_values: [Vec, '?D, ?E] // this is S + region_constraints: [?E: '?D], // from the impl + value: (), // for our purposes, just (), but + // in some cases this might have + // a type or other info +} +``` + +The result would be as follows: + +```text +Canonical(QR) = for { + certainty: Proven, + var_values: [Vec, '?1, ?2] + region_constraints: [?2: '?1], + value: (), +} +``` + +(One subtle point: when we canonicalize the query **result**, we do not +use any special treatment for free lifetimes. Note that both +references to `'?D`, for example, were converted into the same +canonical variable (`?1`). This is in contrast to the original query, +where we canonicalized every free lifetime into a fresh canonical +variable.) + +Now, this result must be reapplied in each context where needed. + +## Processing the canonicalized query result + +In the previous section we produced a canonical query result. We now have +to apply that result in our original context. If you recall, way back in the +beginning, we were trying to prove this query: + +```text +?A: Foo<'static, ?B> +``` + +We canonicalized that into this: + +```text +for { ?0: Foo<'?1, ?2> } +``` + +and now we got back a canonical response: + +```text +for { + certainty: Proven, + var_values: [Vec, '?1, ?2] + region_constraints: [?2: '?1], + value: (), +} +``` + +We now want to apply that response to our context. Conceptually, how +we do that is to (a) instantiate each of the canonical variables in +the result with a fresh inference variable, (b) unify the values in +the result with the original values, and then (c) record the region +constraints for later. Doing step (a) would yield a result of + +```text +{ + certainty: Proven, + var_values: [Vec, '?D, ?C] + ^^ ^^^ fresh inference variables + region_constraints: [?C: '?D], + value: (), +} +``` + +Step (b) would then unify: + +```text +?A with Vec +'static with '?D +?B with ?C +``` + +And finally the region constraint of `?C: 'static` would be recorded +for later verification. + +(What we *actually* do is a mildly optimized variant of that: Rather +than eagerly instantiating all of the canonical values in the result +with variables, we instead walk the vector of values, looking for +cases where the value is just a canonical variable. In our example, +`values[2]` is `?C`, so that means we can deduce that `?C := ?B and +`'?D := 'static`. This gives us a partial set of values. Anything for +which we do not find a value, we create an inference variable.) + diff --git a/src/doc/rustc-guide/src/traits/chalk-overview.md b/src/doc/rustc-guide/src/traits/chalk-overview.md new file mode 100644 index 0000000000..59572479d0 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/chalk-overview.md @@ -0,0 +1,257 @@ +# An Overview of Chalk + +> Chalk is under heavy development, so if any of these links are broken or if +> any of the information is inconsistent with the code or outdated, please +> [open an issue][rustc-issues] so we can fix it. If you are able to fix the +> issue yourself, we would love your contribution! + +[Chalk][chalk] recasts Rust's trait system explicitly in terms of logic +programming by "lowering" Rust code into a kind of logic program we can then +execute queries against. (See [*Lowering to Logic*][lowering-to-logic] and +[*Lowering Rules*][lowering-rules]) Its goal is to be an executable, highly +readable specification of the Rust trait system. + +There are many expected benefits from this work. It will consolidate our +existing, somewhat ad-hoc implementation into something far more principled and +expressive, which should behave better in corner cases, and be much easier to +extend. + +## Chalk Structure + +Chalk has two main "products". The first of these is the +[`chalk_engine`][chalk_engine] crate, which defines the core [SLG +solver][slg]. This is the part rustc uses. + +The rest of chalk can be considered an elaborate testing harness. Chalk is +capable of parsing Rust-like "programs", lowering them to logic, and +performing queries on them. + +Here's a sample session in the chalk repl, chalki. After feeding it our +program, we perform some queries on it. + +```rust,ignore +?- program +Enter a program; press Ctrl-D when finished +| struct Foo { } +| struct Bar { } +| struct Vec { } +| trait Clone { } +| impl Clone for Vec where T: Clone { } +| impl Clone for Foo { } + +?- Vec: Clone +Unique; substitution [], lifetime constraints [] + +?- Vec: Clone +No possible solution. + +?- exists { Vec: Clone } +Ambiguous; no inference guidance +``` + +You can see more examples of programs and queries in the [unit +tests][chalk-test-example]. + +Next we'll go through each stage required to produce the output above. + +### Parsing ([chalk_parse]) + +Chalk is designed to be incorporated with the Rust compiler, so the syntax and +concepts it deals with heavily borrow from Rust. It is convenient for the sake +of testing to be able to run chalk on its own, so chalk includes a parser for a +Rust-like syntax. This syntax is orthogonal to the Rust AST and grammar. It is +not intended to look exactly like it or support the exact same syntax. + +The parser takes that syntax and produces an [Abstract Syntax Tree (AST)][ast]. +You can find the [complete definition of the AST][chalk-ast] in the source code. + +The syntax contains things from Rust that we know and love, for example: traits, +impls, and struct definitions. Parsing is often the first "phase" of +transformation that a program goes through in order to become a format that +chalk can understand. + +### Rust Intermediate Representation ([rust_ir]) + +After getting the AST we convert it to a more convenient intermediate +representation called [`rust_ir`][rust_ir]. This is sort of analogous to the +[HIR] in Rust. The process of converting to IR is called *lowering*. + +The [`rust_ir::Program`][rust_ir-program] struct contains some "rust things" +but indexed and accessible in a different way. For example, if you have a +type like `Foo`, we would represent `Foo` as a string in the AST but in +`rust_ir::Program`, we use numeric indices (`ItemId`). + +The [IR source code][ir-code] contains the complete definition. + +### Chalk Intermediate Representation ([chalk_ir]) + +Once we have Rust IR it is time to convert it to "program clauses". A +[`ProgramClause`] is essentially one of the following: + +* A [clause] of the form `consequence :- conditions` where `:-` is read as + "if" and `conditions = cond1 && cond2 && ...` +* A universally quantified clause of the form + `forall { consequence :- conditions }` + * `forall { ... }` is used to represent [universal quantification]. See the + section on [Lowering to logic][lowering-forall] for more information. + * A key thing to note about `forall` is that we don't allow you to "quantify" + over traits, only types and regions (lifetimes). That is, you can't make a + rule like `forall { u32: Trait }` which would say "`u32` implements + all traits". You can however say `forall { T: Trait }` meaning "`Trait` + is implemented by all types". + * `forall { ... }` is represented in the code using the [`Binders` + struct][binders-struct]. + +*See also: [Goals and Clauses][goals-and-clauses]* + +This is where we encode the rules of the trait system into logic. For +example, if we have the following Rust: + +```rust,ignore +impl Clone for Vec {} +``` + +We generate the following program clause: + +```rust,ignore +forall { (Vec: Clone) :- (T: Clone) } +``` + +This rule dictates that `Vec: Clone` is only satisfied if `T: Clone` is also +satisfied (i.e. "provable"). + +Similar to [`rust_ir::Program`][rust_ir-program] which has "rust-like +things", chalk_ir defines [`ProgramEnvironment`] which which is "pure logic". +The main field in that struct is `program_clauses`, which contains the +[`ProgramClause`]s generated by the rules module. + +#### Rules + +The `rules` module ([source code][rules-src]) defines the logic rules we use +for each item in the Rust IR. It works by iterating over every trait, impl, +etc. and emitting the rules that come from each one. + +*See also: [Lowering Rules][lowering-rules]* + +#### Well-formedness checks + +As part of lowering to logic, we also do some "well formedness" checks. See +the [`rules::wf` source code][rules-wf-src] for where those are done. + +*See also: [Well-formedness checking][wf-checking]* + +#### Coherence + +The function `record_specialization_priorities` in the `coherence` module +([source code][coherence-src]) checks "coherence", which means that it +ensures that two impls of the same trait for the same type cannot exist. + +### Solver ([chalk_solve]) + +Finally, when we've collected all the program clauses we care about, we want +to perform queries on it. The component that finds the answer to these +queries is called the *solver*. + +*See also: [The SLG Solver][slg]* + +## Crates + +Chalk's functionality is broken up into the following crates: +- [**chalk_engine**][chalk_engine]: Defines the core [SLG solver][slg]. +- [**chalk_ir**][chalk_ir]: Defines chalk's internal representation of + types, lifetimes, and goals. +- [**chalk_solve**][chalk_solve]: Combines `chalk_ir` and `chalk_engine`, + effectively. + - [`chalk_engine::context`][engine-context] provides the necessary hooks. +- [**chalk_parse**][chalk_parse]: Defines the raw AST and a parser. +- [**chalk**][doc-chalk]: Brings everything together. Defines the following + modules: + - [`rust_ir`][rust_ir], containing the "HIR-like" form of the AST + - `rust_ir::lowering`, which converts AST to `rust_ir` + - `rules`, which implements logic rules converting `rust_ir` to `chalk_ir` + - `coherence`, which implements coherence rules + - Also includes [chalki][chalki], chalk's REPL. + +[Browse source code on GitHub](https://github.com/rust-lang-nursery/chalk) + +## Testing + +chalk has a test framework for lowering programs to logic, checking the +lowered logic, and performing queries on it. This is how we test the +implementation of chalk itself, and the viability of the [lowering +rules][lowering-rules]. + +The main kind of tests in chalk are **goal tests**. They contain a program, +which is expected to lower to logic successfully, and a set of queries +(goals) along with the expected output. Here's an +[example][chalk-test-example]. Since chalk's output can be quite long, goal +tests support specifying only a prefix of the output. + +**Lowering tests** check the stages that occur before we can issue queries +to the solver: the [lowering to rust_ir][chalk-test-lowering], and the +[well-formedness checks][chalk-test-wf] that occur after that. + +### Testing internals + +Goal tests use a [`test!` macro][test-macro] that takes chalk's Rust-like +syntax and runs it through the full pipeline described above. The macro +ultimately calls the [`solve_goal` function][solve_goal]. + +Likewise, lowering tests use the [`lowering_success!` and +`lowering_error!` macros][test-lowering-macros]. + +## More Resources + +* [Chalk Source Code](https://github.com/rust-lang-nursery/chalk) +* [Chalk Glossary](https://github.com/rust-lang-nursery/chalk/blob/master/GLOSSARY.md) + +### Blog Posts + +* [Lowering Rust traits to logic](http://smallcultfollowing.com/babysteps/blog/2017/01/26/lowering-rust-traits-to-logic/) +* [Unification in Chalk, part 1](http://smallcultfollowing.com/babysteps/blog/2017/03/25/unification-in-chalk-part-1/) +* [Unification in Chalk, part 2](http://smallcultfollowing.com/babysteps/blog/2017/04/23/unification-in-chalk-part-2/) +* [Negative reasoning in Chalk](http://aturon.github.io/blog/2017/04/24/negative-chalk/) +* [Query structure in chalk](http://smallcultfollowing.com/babysteps/blog/2017/05/25/query-structure-in-chalk/) +* [Cyclic queries in chalk](http://smallcultfollowing.com/babysteps/blog/2017/09/12/tabling-handling-cyclic-queries-in-chalk/) +* [An on-demand SLG solver for chalk](http://smallcultfollowing.com/babysteps/blog/2018/01/31/an-on-demand-slg-solver-for-chalk/) + +[goals-and-clauses]: ./goals-and-clauses.html +[HIR]: ../hir.html +[lowering-forall]: ./lowering-to-logic.html#type-checking-generic-functions-beyond-horn-clauses +[lowering-rules]: ./lowering-rules.html +[lowering-to-logic]: ./lowering-to-logic.html +[slg]: ./slg.html +[wf-checking]: ./wf.html + +[ast]: https://en.wikipedia.org/wiki/Abstract_syntax_tree +[chalk]: https://github.com/rust-lang-nursery/chalk +[rustc-issues]: https://github.com/rust-lang-nursery/rustc-guide/issues +[universal quantification]: https://en.wikipedia.org/wiki/Universal_quantification + +[`ProgramClause`]: https://rust-lang-nursery.github.io/chalk/doc/chalk_ir/enum.ProgramClause.html +[`ProgramEnvironment`]: https://rust-lang-nursery.github.io/chalk/doc/chalk_ir/struct.ProgramEnvironment.html +[chalk_engine]: https://rust-lang-nursery.github.io/chalk/doc/chalk_engine/index.html +[chalk_ir]: https://rust-lang-nursery.github.io/chalk/doc/chalk_ir/index.html +[chalk_parse]: https://rust-lang-nursery.github.io/chalk/doc/chalk_parse/index.html +[chalk_solve]: https://rust-lang-nursery.github.io/chalk/doc/chalk_solve/index.html +[doc-chalk]: https://rust-lang-nursery.github.io/chalk/doc/chalk/index.html +[engine-context]: https://rust-lang-nursery.github.io/chalk/doc/chalk_engine/context/index.html +[rust_ir-program]: https://rust-lang-nursery.github.io/chalk/doc/chalk/rust_ir/struct.Program.html +[rust_ir]: https://rust-lang-nursery.github.io/chalk/doc/chalk/rust_ir/index.html + +[binders-struct]: https://github.com/rust-lang-nursery/chalk/blob/94a1941a021842a5fcb35cd043145c8faae59f08/src/ir.rs#L661 +[chalk-ast]: https://github.com/rust-lang-nursery/chalk/blob/master/chalk-parse/src/ast.rs +[chalk-test-example]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test.rs#L115 +[chalk-test-lowering-example]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rust_ir/lowering/test.rs#L8-L31 +[chalk-test-lowering]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rust_ir/lowering/test.rs +[chalk-test-wf]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rules/wf/test.rs#L1 +[chalki]: https://rust-lang-nursery.github.io/chalk/doc/chalki/index.html +[clause]: https://github.com/rust-lang-nursery/chalk/blob/master/GLOSSARY.md#clause +[coherence-src]: https://github.com/rust-lang-nursery/chalk/blob/master/src/coherence.rs +[ir-code]: https://github.com/rust-lang-nursery/chalk/blob/master/src/rust_ir.rs +[rules-environment]: https://github.com/rust-lang-nursery/chalk/blob/94a1941a021842a5fcb35cd043145c8faae59f08/src/rules.rs#L9 +[rules-src]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rules.rs +[rules-wf-src]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/rules/wf.rs +[solve_goal]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test.rs#L85 +[test-lowering-macros]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test_util.rs#L21-L54 +[test-macro]: https://github.com/rust-lang-nursery/chalk/blob/4bce000801de31bf45c02f742a5fce335c9f035f/src/test.rs#L33 diff --git a/src/doc/rustc-guide/src/traits/goals-and-clauses.md b/src/doc/rustc-guide/src/traits/goals-and-clauses.md new file mode 100644 index 0000000000..8f1ffa4882 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/goals-and-clauses.md @@ -0,0 +1,270 @@ +# Goals and clauses + +In logic programming terms, a **goal** is something that you must +prove and a **clause** is something that you know is true. As +described in the [lowering to logic](./lowering-to-logic.html) +chapter, Rust's trait solver is based on an extension of hereditary +harrop (HH) clauses, which extend traditional Prolog Horn clauses with +a few new superpowers. + +## Goals and clauses meta structure + +In Rust's solver, **goals** and **clauses** have the following forms +(note that the two definitions reference one another): + +```text +Goal = DomainGoal // defined in the section below + | Goal && Goal + | Goal || Goal + | exists { Goal } // existential quantification + | forall { Goal } // universal quantification + | if (Clause) { Goal } // implication + | true // something that's trivially true + | ambiguous // something that's never provable + +Clause = DomainGoal + | Clause :- Goal // if can prove Goal, then Clause is true + | Clause && Clause + | forall { Clause } + +K = // a "kind" + | +``` + +The proof procedure for these sorts of goals is actually quite +straightforward. Essentially, it's a form of depth-first search. The +paper +["A Proof Procedure for the Logic of Hereditary Harrop Formulas"][pphhf] +gives the details. + +In terms of code, these types are defined in +[`librustc/traits/mod.rs`][traits_mod] in rustc, and in +[`chalk-ir/src/lib.rs`][chalk_ir] in chalk. + +[pphhf]: ./bibliography.html#pphhf +[traits_mod]: https://github.com/rust-lang/rust/blob/master/src/librustc/traits/mod.rs +[chalk_ir]: https://github.com/rust-lang-nursery/chalk/blob/master/chalk-ir/src/lib.rs + + + +## Domain goals + +*Domain goals* are the atoms of the trait logic. As can be seen in the +definitions given above, general goals basically consist in a combination of +domain goals. + +Moreover, flattenning a bit the definition of clauses given previously, one can +see that clauses are always of the form: +```text +forall { DomainGoal :- Goal } +``` +hence domain goals are in fact clauses' LHS. That is, at the most granular level, +domain goals are what the trait solver will end up trying to prove. + + + +To define the set of domain goals in our system, we need to first +introduce a few simple formulations. A **trait reference** consists of +the name of a trait along with a suitable set of inputs P0..Pn: + +```text +TraitRef = P0: TraitName +``` + +So, for example, `u32: Display` is a trait reference, as is `Vec: +IntoIterator`. Note that Rust surface syntax also permits some extra +things, like associated type bindings (`Vec: IntoIterator`), that are not part of a trait reference. + + + +A **projection** consists of an associated item reference along with +its inputs P0..Pm: + +```text +Projection = >::AssocItem +``` + +Given these, we can define a `DomainGoal` as follows: + +```text +DomainGoal = Holds(WhereClause) + | FromEnv(TraitRef) + | FromEnv(Type) + | WellFormed(TraitRef) + | WellFormed(Type) + | Normalize(Projection -> Type) + +WhereClause = Implemented(TraitRef) + | ProjectionEq(Projection = Type) + | Outlives(Type: Region) + | Outlives(Region: Region) +``` + +`WhereClause` refers to a `where` clause that a Rust user would actually be able +to write in a Rust program. This abstraction exists only as a convenience as we +sometimes want to only deal with domain goals that are effectively writable in +Rust. + +Let's break down each one of these, one-by-one. + +#### Implemented(TraitRef) +e.g. `Implemented(i32: Copy)` + +True if the given trait is implemented for the given input types and lifetimes. + +#### ProjectionEq(Projection = Type) +e.g. `ProjectionEq::Item = u8` + +The given associated type `Projection` is equal to `Type`; this can be proved +with either normalization or using placeholder associated types. See +[the section on associated types](./associated-types.html). + +#### Normalize(Projection -> Type) +e.g. `ProjectionEq::Item -> u8` + +The given associated type `Projection` can be [normalized][n] to `Type`. + +As discussed in [the section on associated +types](./associated-types.html), `Normalize` implies `ProjectionEq`, +but not vice versa. In general, proving `Normalize(::Item -> U)` +also requires proving `Implemented(T: Trait)`. + +[n]: ./associated-types.html#normalize +[at]: ./associated-types.html + +#### FromEnv(TraitRef) +e.g. `FromEnv(Self: Add)` + +True if the inner `TraitRef` is *assumed* to be true, +that is, if it can be derived from the in-scope where clauses. + +For example, given the following function: + +```rust +fn loud_clone(stuff: &T) -> T { + println!("cloning!"); + stuff.clone() +} +``` + +Inside the body of our function, we would have `FromEnv(T: Clone)`. In-scope +where clauses nest, so a function body inside an impl body inherits the +impl body's where clauses, too. + +This and the next rule are used to implement [implied bounds]. As we'll see +in the section on lowering, `FromEnv(TraitRef)` implies `Implemented(TraitRef)`, +but not vice versa. This distinction is crucial to implied bounds. + +#### FromEnv(Type) +e.g. `FromEnv(HashSet)` + +True if the inner `Type` is *assumed* to be well-formed, that is, if it is an +input type of a function or an impl. + +For example, given the following code: + +```rust,ignore +struct HashSet where K: Hash { ... } + +fn loud_insert(set: &mut HashSet, item: K) { + println!("inserting!"); + set.insert(item); +} +``` + +`HashSet` is an input type of the `loud_insert` function. Hence, we assume it +to be well-formed, so we would have `FromEnv(HashSet)` inside the body of our +function. As we'll see in the section on lowering, `FromEnv(HashSet)` implies +`Implemented(K: Hash)` because the +`HashSet` declaration was written with a `K: Hash` where clause. Hence, we don't +need to repeat that bound on the `loud_insert` function: we rather automatically +assume that it is true. + +#### WellFormed(Item) +These goals imply that the given item is *well-formed*. + +We can talk about different types of items being well-formed: + +* *Types*, like `WellFormed(Vec)`, which is true in Rust, or + `WellFormed(Vec)`, which is not (because `str` is not `Sized`.) + +* *TraitRefs*, like `WellFormed(Vec: Clone)`. + +Well-formedness is important to [implied bounds]. In particular, the reason +it is okay to assume `FromEnv(T: Clone)` in the `loud_clone` example is that we +_also_ verify `WellFormed(T: Clone)` for each call site of `loud_clone`. +Similarly, it is okay to assume `FromEnv(HashSet)` in the `loud_insert` +example because we will verify `WellFormed(HashSet)` for each call site of +`loud_insert`. + +#### Outlives(Type: Region), Outlives(Region: Region) +e.g. `Outlives(&'a str: 'b)`, `Outlives('a: 'static)` + +True if the given type or region on the left outlives the right-hand region. + + + +## Coinductive goals + +Most goals in our system are "inductive". In an inductive goal, +circular reasoning is disallowed. Consider this example clause: + +```text + Implemented(Foo: Bar) :- + Implemented(Foo: Bar). +``` + +Considered inductively, this clause is useless: if we are trying to +prove `Implemented(Foo: Bar)`, we would then recursively have to prove +`Implemented(Foo: Bar)`, and that cycle would continue ad infinitum +(the trait solver will terminate here, it would just consider that +`Implemented(Foo: Bar)` is not known to be true). + +However, some goals are *co-inductive*. Simply put, this means that +cycles are OK. So, if `Bar` were a co-inductive trait, then the rule +above would be perfectly valid, and it would indicate that +`Implemented(Foo: Bar)` is true. + +*Auto traits* are one example in Rust where co-inductive goals are used. +Consider the `Send` trait, and imagine that we have this struct: + +```rust +struct Foo { + next: Option> +} +``` + +The default rules for auto traits say that `Foo` is `Send` if the +types of its fields are `Send`. Therefore, we would have a rule like + +```text +Implemented(Foo: Send) :- + Implemented(Option>: Send). +``` + +As you can probably imagine, proving that `Option>: Send` is +going to wind up circularly requiring us to prove that `Foo: Send` +again. So this would be an example where we wind up in a cycle – but +that's ok, we *do* consider `Foo: Send` to hold, even though it +references itself. + +In general, co-inductive traits are used in Rust trait solving when we +want to enumerate a fixed set of possibilities. In the case of auto +traits, we are enumerating the set of reachable types from a given +starting point (i.e., `Foo` can reach values of type +`Option>`, which implies it can reach values of type +`Box`, and then of type `Foo`, and then the cycle is complete). + +In addition to auto traits, `WellFormed` predicates are co-inductive. +These are used to achieve a similar "enumerate all the cases" pattern, +as described in the section on [implied bounds]. + +[implied bounds]: ./lowering-rules.html#implied-bounds + +## Incomplete chapter + +Some topics yet to be written: + +- Elaborate on the proof procedure +- SLG solving – introduce negative reasoning diff --git a/src/doc/rustc-guide/src/traits/hrtb.md b/src/doc/rustc-guide/src/traits/hrtb.md new file mode 100644 index 0000000000..8b3a9f649b --- /dev/null +++ b/src/doc/rustc-guide/src/traits/hrtb.md @@ -0,0 +1,127 @@ +# Higher-ranked trait bounds + +One of the more subtle concepts in trait resolution is *higher-ranked trait +bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`. +Let's walk through how selection on higher-ranked trait references +works. + +## Basic matching and placeholder leaks + +Suppose we have a trait `Foo`: + +```rust +trait Foo { + fn foo(&self, x: X) { } +} +``` + +Let's say we have a function `want_hrtb` that wants a type which +implements `Foo<&'a isize>` for any `'a`: + +```rust,ignore +fn want_hrtb() where T : for<'a> Foo<&'a isize> { ... } +``` + +Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any +`'a`: + +```rust,ignore +struct AnyInt; +impl<'a> Foo<&'a isize> for AnyInt { } +``` + +And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the +answer to be yes. The algorithm for figuring it out is closely related +to the subtyping for higher-ranked types (which is described [here][hrsubtype] +and also in a [paper by SPJ]. If you wish to understand higher-ranked +subtyping, we recommend you read the paper). There are a few parts: + +1. Replace bound regions in the obligation with placeholders. +2. Match the impl against the [placeholder] obligation. +3. Check for _placeholder leaks_. + +[placeholder]: ../appendix/glossary.html#appendix-c-glossary +[hrsubtype]: https://github.com/rust-lang/rust/tree/master/src/librustc/infer/higher_ranked/README.md +[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/ + +So let's work through our example. + +1. The first thing we would do is to +replace the bound region in the obligation with a placeholder, yielding +`AnyInt : Foo<&'0 isize>` (here `'0` represents placeholder region #0). +Note that we now have no quantifiers; +in terms of the compiler type, this changes from a `ty::PolyTraitRef` +to a `TraitRef`. We would then create the `TraitRef` from the impl, +using fresh variables for it's bound regions (and thus getting +`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). + +2. Next +we relate the two trait refs, yielding a graph with the constraint +that `'0 == '$a`. + +3. Finally, we check for placeholder "leaks" – a +leak is basically any attempt to relate a placeholder region to another +placeholder region, or to any region that pre-existed the impl match. +The leak check is done by searching from the placeholder region to find +the set of regions that it is related to in any way. This is called +the "taint" set. To pass the check, that set must consist *solely* of +itself and region variables from the impl. If the taint set includes +any other region, then the match is a failure. In this case, the taint +set for `'0` is `{'0, '$a}`, and hence the check will succeed. + +Let's consider a failure case. Imagine we also have a struct + +```rust,ignore +struct StaticInt; +impl Foo<&'static isize> for StaticInt; +``` + +We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be +considered unsatisfied. The check begins just as before. `'a` is +replaced with a placeholder `'0` and the impl trait reference is instantiated to +`Foo<&'static isize>`. When we relate those two, we get a constraint +like `'static == '0`. This means that the taint set for `'0` is `{'0, +'static}`, which fails the leak check. + +**TODO**: This is because `'static` is not a region variable but is in the +taint set, right? + +## Higher-ranked trait obligations + +Once the basic matching is done, we get to another interesting topic: +how to deal with impl obligations. I'll work through a simple example +here. Imagine we have the traits `Foo` and `Bar` and an associated impl: + +```rust +trait Foo { + fn foo(&self, x: X) { } +} + +trait Bar { + fn bar(&self, x: X) { } +} + +impl Foo for F + where F : Bar +{ +} +``` + +Now let's say we have a obligation `Baz: for<'a> Foo<&'a isize>` and we match +this impl. What obligation is generated as a result? We want to get +`Baz: for<'a> Bar<&'a isize>`, but how does that happen? + +After the matching, we are in a position where we have a placeholder +substitution like `X => &'0 isize`. If we apply this substitution to the +impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not +directly usable because the placeholder region `'0` cannot leak out of +our computation. + +What we do is to create an inverse mapping from the taint set of `'0` +back to the original bound region (`'a`, here) that `'0` resulted +from. (This is done in `higher_ranked::plug_leaks`). We know that the +leak check passed, so this taint set consists solely of the placeholder +region itself plus various intermediate region variables. We then walk +the trait-reference and convert every region in that taint set back to +a late-bound region, so in this case we'd wind up with +`Baz: for<'a> Bar<&'a isize>`. diff --git a/src/doc/rustc-guide/src/traits/implied-bounds.md b/src/doc/rustc-guide/src/traits/implied-bounds.md new file mode 100644 index 0000000000..f32c9d0cbb --- /dev/null +++ b/src/doc/rustc-guide/src/traits/implied-bounds.md @@ -0,0 +1,502 @@ +# Implied Bounds + +Implied bounds remove the need to repeat where clauses written on +a type declaration or a trait declaration. For example, say we have the +following type declaration: +```rust,ignore +struct HashSet { + ... +} +``` + +then everywhere we use `HashSet` as an "input" type, that is appearing in +the receiver type of an `impl` or in the arguments of a function, we don't +want to have to repeat the `where K: Hash` bound, as in: + +```rust,ignore +// I don't want to have to repeat `where K: Hash` here. +impl HashSet { + ... +} + +// Same here. +fn loud_insert(set: &mut HashSet, item: K) { + println!("inserting!"); + set.insert(item); +} +``` + +Note that in the `loud_insert` example, `HashSet` is not the type +of the `set` argument of `loud_insert`, it only *appears* in the +argument type `&mut HashSet`: we care about every type appearing +in the function's header (the header is the signature without the return type), +not only types of the function's arguments. + +The rationale for applying implied bounds to input types is that, for example, +in order to call the `loud_insert` function above, the programmer must have +*produced* the type `HashSet` already, hence the compiler already verified +that `HashSet` was well-formed, i.e. that `K` effectively implemented +`Hash`, as in the following example: + +```rust,ignore +fn main() { + // I am producing a value of type `HashSet`. + // If `i32` was not `Hash`, the compiler would report an error here. + let set: HashSet = HashSet::new(); + loud_insert(&mut set, 5); +} +``` + +Hence, we don't want to repeat where clauses for input types because that would +sort of duplicate the work of the programmer, having to verify that their types +are well-formed both when calling the function and when using them in the +arguments of their function. The same reasoning applies when using an `impl`. + +Similarly, given the following trait declaration: +```rust,ignore +trait Copy where Self: Clone { // desugared version of `Copy: Clone` + ... +} +``` + +then everywhere we bound over `SomeType: Copy`, we would like to be able to +use the fact that `SomeType: Clone` without having to write it explicitly, +as in: +```rust,ignore +fn loud_clone(x: T) { + println!("cloning!"); + x.clone(); +} + +fn fun_with_copy(x: T) { + println!("will clone a `Copy` type soon..."); + + // I'm using `loud_clone` with `T: Copy`, I know this + // implies `T: Clone` so I don't want to have to write it explicitly. + loud_clone(x); +} +``` + +The rationale for implied bounds for traits is that if a type implements +`Copy`, that is, if there exists an `impl Copy` for that type, there *ought* +to exist an `impl Clone` for that type, otherwise the compiler would have +reported an error in the first place. So again, if we were forced to repeat the +additionnal `where SomeType: Clone` everywhere whereas we already know that +`SomeType: Copy` hold, we would kind of duplicate the verification work. + +Implied bounds are not yet completely enforced in rustc, at the moment it only +works for outlive requirements, super trait bounds, and bounds on associated +types. The full RFC can be found [here][RFC]. We'll give here a brief view +of how implied bounds work and why we chose to implement it that way. The +complete set of lowering rules can be found in the corresponding +[chapter](./lowering-rules.md). + +[RFC]: https://github.com/rust-lang/rfcs/blob/master/text/2089-implied-bounds.md + +## Implied bounds and lowering rules + +Now we need to express implied bounds in terms of logical rules. We will start +with exposing a naive way to do it. Suppose that we have the following traits: +```rust,ignore +trait Foo { + ... +} + +trait Bar where Self: Foo { } { + ... +} +``` + +So we would like to say that if a type implements `Bar`, then necessarily +it must also implement `Foo`. We might think that a clause like this would +work: +```text +forall { + Implemented(Type: Foo) :- Implemented(Type: Bar). +} +``` + +Now suppose that we just write this impl: +```rust,ignore +struct X; + +impl Bar for X { } +``` + +Clearly this should not be allowed: indeed, we wrote a `Bar` impl for `X`, but +the `Bar` trait requires that we also implement `Foo` for `X`, which we never +did. In terms of what the compiler does, this would look like this: +```rust,ignore +struct X; + +impl Bar for X { + // We are in a `Bar` impl for the type `X`. + // There is a `where Self: Foo` bound on the `Bar` trait declaration. + // Hence I need to prove that `X` also implements `Foo` for that impl + // to be legal. +} +``` +So the compiler would try to prove `Implemented(X: Foo)`. Of course it will +not find any `impl Foo for X` since we did not write any. However, it +will see our implied bound clause: +```text +forall { + Implemented(Type: Foo) :- Implemented(Type: Bar). +} +``` + +so that it may be able to prove `Implemented(X: Foo)` if `Implemented(X: Bar)` +holds. And it turns out that `Implemented(X: Bar)` does hold since we wrote +a `Bar` impl for `X`! Hence the compiler will accept the `Bar` impl while it +should not. + +## Implied bounds coming from the environment + +So the naive approach does not work. What we need to do is to somehow decouple +implied bounds from impls. Suppose we know that a type `SomeType<...>` +implements `Bar` and we want to deduce that `SomeType<...>` must also implement +`Foo`. + +There are two possibilities: first, we have enough information about +`SomeType<...>` to see that there exists a `Bar` impl in the program which +covers `SomeType<...>`, for example a plain `impl<...> Bar for SomeType<...>`. +Then if the compiler has done its job correctly, there *must* exist a `Foo` +impl which covers `SomeType<...>`, e.g. another plain +`impl<...> Foo for SomeType<...>`. In that case then, we can just use this +impl and we do not need implied bounds at all. + +Second possibility: we do not know enough about `SomeType<...>` in order to +find a `Bar` impl which covers it, for example if `SomeType<...>` is just +a type parameter in a function: +```rust,ignore +fn foo() { + // We'd like to deduce `Implemented(T: Foo)`. +} +``` + +That is, the information that `T` implements `Bar` here comes from the +*environment*. The environment is the set of things that we assume to be true +when we type check some Rust declaration. In that case, what we assume is that +`T: Bar`. Then at that point, we might authorize ourselves to have some kind +of "local" implied bound reasoning which would say +`Implemented(T: Foo) :- Implemented(T: Bar)`. This reasoning would +only be done within our `foo` function in order to avoid the earlier +problem where we had a global clause. + +We can apply these local reasonings everywhere we can have an environment +-- i.e. when we can write where clauses -- that is, inside impls, +trait declarations, and type declarations. + +## Computing implied bounds with `FromEnv` + +The previous subsection showed that it was only useful to compute implied +bounds for facts coming from the environment. +We talked about "local" rules, but there are multiple possible strategies to +indeed implement the locality of implied bounds. + +In rustc, the current strategy is to *elaborate* bounds: that is, each time +we have a fact in the environment, we recursively derive all the other things +that are implied by this fact until we reach a fixed point. For example, if +we have the following declarations: +```rust,ignore +trait A { } +trait B where Self: A { } +trait C where Self: B { } + +fn foo() { + ... +} +``` +then inside the `foo` function, we start with an environment containing only +`Implemented(T: C)`. Then because of implied bounds for the `C` trait, we +elaborate `Implemented(T: B)` and add it to our environment. Because of +implied bounds for the `B` trait, we elaborate `Implemented(T: A)`and add it +to our environment as well. We cannot elaborate anything else, so we conclude +that our final environment consists of `Implemented(T: A + B + C)`. + +In the new-style trait system, we like to encode as many things as possible +with logical rules. So rather than "elaborating", we have a set of *global* +program clauses defined like so: +```text +forall { Implemented(T: A) :- FromEnv(T: A). } + +forall { Implemented(T: B) :- FromEnv(T: B). } +forall { FromEnv(T: A) :- FromEnv(T: B). } + +forall { Implemented(T: C) :- FromEnv(T: C). } +forall { FromEnv(T: C) :- FromEnv(T: C). } +``` +So these clauses are defined globally (that is, they are available from +everywhere in the program) but they cannot be used because the hypothesis +is always of the form `FromEnv(...)` which is a bit special. Indeed, as +indicated by the name, `FromEnv(...)` facts can **only** come from the +environment. +How it works is that in the `foo` function, instead of having an environment +containing `Implemented(T: C)`, we replace this environment with +`FromEnv(T: C)`. From here and thanks to the above clauses, we see that we +are able to reach any of `Implemented(T: A)`, `Implemented(T: B)` or +`Implemented(T: C)`, which is what we wanted. + +## Implied bounds and well-formedness checking + +Implied bounds are tightly related with well-formedness checking. +Well-formedness checking is the process of checking that the impls the +programmer wrote are legal, what we referred to earlier as "the compiler doing +its job correctly". + +We already saw examples of illegal and legal impls: +```rust,ignore +trait Foo { } +trait Bar where Self: Foo { } + +struct X; +struct Y; + +impl Bar for X { + // This impl is not legal: the `Bar` trait requires that we also + // implement `Foo`, and we didn't. +} + +impl Foo for Y { + // This impl is legal: there is nothing to check as there are no where + // clauses on the `Foo` trait. +} + +impl Bar for Y { + // This impl is legal: we have a `Foo` impl for `Y`. +} +``` +We must define what "legal" and "illegal" mean. For this, we introduce another +predicate: `WellFormed(Type: Trait)`. We say that the trait reference +`Type: Trait` is well-formed if `Type` meets the bounds written on the +`Trait` declaration. For each impl we write, assuming that the where clauses +declared on the impl hold, the compiler tries to prove that the corresponding +trait reference is well-formed. The impl is legal if the compiler manages to do +so. + +Coming to the definition of `WellFormed(Type: Trait)`, it would be tempting +to define it as: +```rust,ignore +trait Trait where WC1, WC2, ..., WCn { + ... +} +``` + +```text +forall { + WellFormed(Type: Trait) :- WC1 && WC2 && .. && WCn. +} +``` +and indeed this was basically what was done in rustc until it was noticed that +this mixed badly with implied bounds. The key thing is that implied bounds +allows someone to derive all bounds implied by a fact in the environment, and +this *transitively* as we've seen with the `A + B + C` traits example. +However, the `WellFormed` predicate as defined above only checks that the +*direct* superbounds hold. That is, if we come back to our `A + B + C` +example: +```rust,ignore +trait A { } +// No where clauses, always well-formed. +// forall { WellFormed(Type: A). } + +trait B where Self: A { } +// We only check the direct superbound `Self: A`. +// forall { WellFormed(Type: B) :- Implemented(Type: A). } + +trait C where Self: B { } +// We only check the direct superbound `Self: B`. We do not check +// the `Self: A` implied bound coming from the `Self: B` superbound. +// forall { WellFormed(Type: C) :- Implemented(Type: B). } +``` +There is an asymmetry between the recursive power of implied bounds and +the shallow checking of `WellFormed`. It turns out that this asymmetry +can be [exploited][bug]. Indeed, suppose that we define the following +traits: +```rust,ignore +trait Partial where Self: Copy { } +// WellFormed(Self: Partial) :- Implemented(Self: Copy). + +trait Complete where Self: Partial { } +// WellFormed(Self: Complete) :- Implemented(Self: Partial). + +impl Partial for T where T: Complete { } + +impl Complete for T { } +``` + +For the `Partial` impl, what the compiler must prove is: +```text +forall { + if (T: Complete) { // assume that the where clauses hold + WellFormed(T: Partial) // show that the trait reference is well-formed + } +} +``` +Proving `WellFormed(T: Partial)` amounts to proving `Implemented(T: Copy)`. +However, we have `Implemented(T: Complete)` in our environment: thanks to +implied bounds, we can deduce `Implemented(T: Partial)`. Using implied bounds +one level deeper, we can deduce `Implemented(T: Copy)`. Finally, the `Partial` +impl is legal. + +For the `Complete` impl, what the compiler must prove is: +```text +forall { + WellFormed(T: Complete) // show that the trait reference is well-formed +} +``` +Proving `WellFormed(T: Complete)` amounts to proving `Implemented(T: Partial)`. +We see that the `impl Partial for T` applies if we can prove +`Implemented(T: Complete)`, and it turns out we can prove this fact since our +`impl Complete for T` is a blanket impl without any where clauses. + +So both impls are legal and the compiler accepts the program. Moreover, thanks +to the `Complete` blanket impl, all types implement `Complete`. So we could +now use this impl like so: +```rust,ignore +fn eat(x: T) { } + +fn copy_everything(x: T) { + eat(x); + eat(x); +} + +fn main() { + let not_copiable = vec![1, 2, 3, 4]; + copy_everything(not_copiable); +} +``` +In this program, we use the fact that `Vec` implements `Complete`, as any +other type. Hence we can call `copy_everything` with an argument of type +`Vec`. Inside the `copy_everything` function, we have the +`Implemented(T: Complete)` bound in our environment. Thanks to implied bounds, +we can deduce `Implemented(T: Partial)`. Using implied bounds again, we deduce +`Implemented(T: Copy)` and we can indeed call the `eat` function which moves +the argument twice since its argument is `Copy`. Problem: the `T` type was +in fact `Vec` which is not copy at all, hence we will double-free the +underlying vec storage so we have a memory unsoundness in safe Rust. + +Of course, disregarding the asymmetry between `WellFormed` and implied bounds, +this bug was possible only because we had some kind of self-referencing impls. +But self-referencing impls are very useful in practice and are not the real +culprits in this affair. + +[bug]: https://github.com/rust-lang/rust/pull/43786 + +## Co-inductiveness of `WellFormed` + +So the solution is to fix this asymmetry between `WellFormed` and implied +bounds. For that, we need for the `WellFormed` predicate to not only require +that the direct superbounds hold, but also all the bounds transitively implied +by the superbounds. What we can do is to have the following rules for the +`WellFormed` predicate: +```rust,ignore +trait A { } +// WellFormed(Self: A) :- Implemented(Self: A). + +trait B where Self: A { } +// WellFormed(Self: B) :- Implemented(Self: B) && WellFormed(Self: A). + +trait C where Self: B { } +// WellFormed(Self: C) :- Implemented(Self: C) && WellFormed(Self: B). +``` + +Notice that we are now also requiring `Implemented(Self: Trait)` for +`WellFormed(Self: Trait)` to be true: this is to simplify the process of +traversing all the implied bounds transitively. This does not change anything +when checking whether impls are legal, because since we assume +that the where clauses hold inside the impl, we know that the corresponding +trait reference do hold. Thanks to this setup, you can see that we indeed +require to prove the set of all bounds transitively implied by the where +clauses. + +However there is still a catch. Suppose that we have the following trait +definition: +```rust,ignore +trait Foo where ::Item: Foo { + type Item; +} +``` + +so this definition is a bit more involved than the ones we've seen already +because it defines an associated item. However, the well-formedness rule +would not be more complicated: +```text +WellFormed(Self: Foo) :- + Implemented(Self: Foo) && + WellFormed(::Item: Foo). +``` + +Now we would like to write the following impl: +```rust,ignore +impl Foo for i32 { + type Item = i32; +} +``` +The `Foo` trait definition and the `impl Foo for i32` are perfectly valid +Rust: we're kind of recursively using our `Foo` impl in order to show that +the associated value indeed implements `Foo`, but that's ok. But if we +translate this to our well-formedness setting, the compiler proof process +inside the `Foo` impl is the following: it starts with proving that the +well-formedness goal `WellFormed(i32: Foo)` is true. In order to do that, +it must prove the following goals: `Implemented(i32: Foo)` and +`WellFormed(::Item: Foo)`. `Implemented(i32: Foo)` holds because +there is our impl and there are no where clauses on it so it's always true. +However, because of the associated type value we used, +`WellFormed(::Item: Foo)` simplifies to just +`WellFormed(i32: Foo)`. So in order to prove its original goal +`WellFormed(i32: Foo)`, the compiler needs to prove `WellFormed(i32: Foo)`: +this clearly is a cycle and cycles are usually rejected by the trait solver, +unless... if the `WellFormed` predicate was made to be co-inductive. + +A co-inductive predicate, as discussed in the chapter on +[goals and clauses](./goals-and-clauses.md#coinductive-goals), are predicates +for which the +trait solver accepts cycles. In our setting, this would be a valid thing to do: +indeed, the `WellFormed` predicate just serves as a way of enumerating all +the implied bounds. Hence, it's like a fixed point algorithm: it tries to grow +the set of implied bounds until there is nothing more to add. Here, a cycle +in the chain of `WellFormed` predicates just means that there is no more bounds +to add in that direction, so we can just accept this cycle and focus on other +directions. It's easy to prove that under these co-inductive semantics, we +are effectively visiting all the transitive implied bounds, and only these. + +## Implied bounds on types + +We mainly talked about implied bounds for traits because this was the most +subtle regarding implementation. Implied bounds on types are simpler, +especially because if we assume that a type is well-formed, we don't use that +fact to deduce that other types are well-formed, we only use it to deduce +that e.g. some trait bounds hold. + +For types, we just use rules like these ones: +```rust,ignore +struct Type<...> where WC1, ..., WCn { + ... +} +``` + +```text +forall<...> { + WellFormed(Type<...>) :- WC1, ..., WCn. +} + +forall<...> { + FromEnv(WC1) :- FromEnv(Type<...>). + ... + FromEnv(WCn) :- FromEnv(Type<...>). +} +``` +We can see that we have this asymmetry between well-formedness check, +which only verifies that the direct superbounds hold, and implied bounds which +gives access to all bounds transitively implied by the where clauses. In that +case this is ok because as we said, we don't use `FromEnv(Type<...>)` to deduce +other `FromEnv(OtherType<...>)` things, nor do we use `FromEnv(Type: Trait)` to +deduce `FromEnv(OtherType<...>)` things. So in that sense type definitions are +"less recursive" than traits, and we saw in a previous subsection that +it was the combination of asymmetry and recursive trait / impls that led to +unsoundness. As such, the `WellFormed(Type<...>)` predicate does not need +to be co-inductive. + +This asymmetry optimization is useful because in a real Rust program, we have +to check the well-formedness of types very often (e.g. for each type which +appears in the body of a function). diff --git a/src/doc/rustc-guide/src/traits/index.md b/src/doc/rustc-guide/src/traits/index.md new file mode 100644 index 0000000000..84f812394b --- /dev/null +++ b/src/doc/rustc-guide/src/traits/index.md @@ -0,0 +1,64 @@ +# Trait solving (new-style) + +> 🚧 This chapter describes "new-style" trait solving. This is still in the +> [process of being implemented][wg]; this chapter serves as a kind of +> in-progress design document. If you would prefer to read about how the +> current trait solver works, check out +> [this other chapter](./resolution.html). 🚧 +> +> By the way, if you would like to help in hacking on the new solver, you will +> find instructions for getting involved in the +> [Traits Working Group tracking issue][wg]! + +[wg]: https://github.com/rust-lang/rust/issues/48416 + +The new-style trait solver is based on the work done in [chalk][chalk]. Chalk +recasts Rust's trait system explicitly in terms of logic programming. It does +this by "lowering" Rust code into a kind of logic program we can then execute +queries against. + +You can read more about chalk itself in the +[Overview of Chalk](./chalk-overview.md) section. + +Trait solving in rustc is based around a few key ideas: + +- [Lowering to logic](./lowering-to-logic.html), which expresses + Rust traits in terms of standard logical terms. + - The [goals and clauses](./goals-and-clauses.html) chapter + describes the precise form of rules we use, and + [lowering rules](./lowering-rules.html) gives the complete set of + lowering rules in a more reference-like form. + - [Lazy normalization](./associated-types.html), which is the + technique we use to accommodate associated types when figuring out + whether types are equal. + - [Region constraints](./regions.html), which are accumulated + during trait solving but mostly ignored. This means that trait + solving effectively ignores the precise regions involved, always – + but we still remember the constraints on them so that those + constraints can be checked by the type checker. +- [Canonical queries](./canonical-queries.html), which allow us + to solve trait problems (like "is `Foo` implemented for the type + `Bar`?") once, and then apply that same result independently in many + different inference contexts. + +> This is not a complete list of topics. See the sidebar for more. + +## Ongoing work +The design of the new-style trait solving currently happens in two places: + +**chalk**. The [chalk][chalk] repository is where we experiment with new ideas +and designs for the trait system. It primarily consists of two parts: +* a unit testing framework + for the correctness and feasibility of the logical rules defining the + new-style trait system. +* the [`chalk_engine`][chalk_engine] crate, which + defines the new-style trait solver used both in the unit testing framework + and in rustc. + +**rustc**. Once we are happy with the logical rules, we proceed to +implementing them in rustc. This mainly happens in +[`librustc_traits`][librustc_traits]. + +[chalk]: https://github.com/rust-lang-nursery/chalk +[chalk_engine]: https://github.com/rust-lang-nursery/chalk/tree/master/chalk-engine +[librustc_traits]: https://github.com/rust-lang/rust/tree/master/src/librustc_traits diff --git a/src/doc/rustc-guide/src/traits/lowering-module.md b/src/doc/rustc-guide/src/traits/lowering-module.md new file mode 100644 index 0000000000..9394840511 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/lowering-module.md @@ -0,0 +1,66 @@ +# The lowering module in rustc + +The program clauses described in the +[lowering rules](./lowering-rules.html) section are actually +created in the [`rustc_traits::lowering`][lowering] module. + +[lowering]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_traits/lowering/ + +## The `program_clauses_for` query + +The main entry point is the `program_clauses_for` [query], which – +given a def-id – produces a set of Chalk program clauses. These +queries are tested using a +[dedicated unit-testing mechanism, described below](#unit-tests). The +query is invoked on a `DefId` that identifies something like a trait, +an impl, or an associated item definition. It then produces and +returns a vector of program clauses. + +[query]: ../query.html + + + +## Unit tests + +Unit tests are located in [`src/test/ui/chalkify`][chalkify]. A good +example test is [the `lower_impl` test][lower_impl]. At the time of +this writing, it looked like this: + +```rust,ignore +#![feature(rustc_attrs)] + +trait Foo { } + +#[rustc_dump_program_clauses] //~ ERROR Implemented(T: Foo) :- +impl Foo for T where T: Iterator { } + +fn main() { + println!("hello"); +} +``` + +The `#[rustc_dump_program_clauses]` annotation can be attached to +anything with a def-id. (It requires the `rustc_attrs` feature.) The +compiler will then invoke the `program_clauses_for` query on that +item, and emit compiler errors that dump the clauses produced. These +errors just exist for unit-testing, as we can then leverage the +standard [ui test] mechanisms to check them. In this case, there is a +`//~ ERROR Implemented` annotation which is intentionally minimal (it +need only be a prefix of the error), but [the stderr file] contains +the full details: + +```text +error: Implemented(T: Foo) :- ProjectionEq(::Item == i32), TypeOutlives(T \ +: 'static), Implemented(T: std::iter::Iterator), Implemented(T: std::marker::Sized). + --> $DIR/lower_impl.rs:15:1 + | +LL | #[rustc_dump_program_clauses] //~ ERROR Implemented(T: Foo) :- + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to previous error +``` + +[chalkify]: https://github.com/rust-lang/rust/tree/master/src/test/ui/chalkify +[lower_impl]: https://github.com/rust-lang/rust/tree/master/src/test/ui/chalkify/lower_impl.rs +[the stderr file]: https://github.com/rust-lang/rust/tree/master/src/test/ui/chalkify/lower_impl.stderr +[ui test]: ../tests/adding.html#guide-to-the-ui-tests diff --git a/src/doc/rustc-guide/src/traits/lowering-rules.md b/src/doc/rustc-guide/src/traits/lowering-rules.md new file mode 100644 index 0000000000..88a61ac4fa --- /dev/null +++ b/src/doc/rustc-guide/src/traits/lowering-rules.md @@ -0,0 +1,416 @@ +# Lowering rules + +This section gives the complete lowering rules for Rust traits into +[program clauses][pc]. It is a kind of reference. These rules +reference the [domain goals][dg] defined in an earlier section. + +[pc]: ./goals-and-clauses.html +[dg]: ./goals-and-clauses.html#domain-goals + +## Notation + +The nonterminal `Pi` is used to mean some generic *parameter*, either a +named lifetime like `'a` or a type parameter like `A`. + +The nonterminal `Ai` is used to mean some generic *argument*, which +might be a lifetime like `'a` or a type like `Vec`. + +When defining the lowering rules, we will give goals and clauses in +the [notation given in this section](./goals-and-clauses.html). +We sometimes insert "macros" like `LowerWhereClause!` into these +definitions; these macros reference other sections within this chapter. + +## Rule names and cross-references + +Each of these lowering rules is given a name, documented with a +comment like so: + + // Rule Foo-Bar-Baz + +The reference implementation of these rules is to be found in +[`chalk/src/rules.rs`][chalk_rules]. They are also ported in rustc in the +[`librustc_traits`][librustc_traits] crate. + +[chalk_rules]: https://github.com/rust-lang-nursery/chalk/blob/master/src/rules.rs +[librustc_traits]: https://github.com/rust-lang/rust/tree/master/src/librustc_traits + +## Lowering where clauses + +When used in a goal position, where clauses can be mapped directly to +the `Holds` variant of [domain goals][dg], as follows: + +- `A0: Foo` maps to `Implemented(A0: Foo)` +- `T: 'r` maps to `Outlives(T, 'r)` +- `'a: 'b` maps to `Outlives('a, 'b)` +- `A0: Foo` is a bit special and expands to two distinct + goals, namely `Implemented(A0: Foo)` and + `ProjectionEq(>::Item = T)` + +In the rules below, we will use `WC` to indicate where clauses that +appear in Rust syntax; we will then use the same `WC` to indicate +where those where clauses appear as goals in the program clauses that +we are producing. In that case, the mapping above is used to convert +from the Rust syntax into goals. + +### Transforming the lowered where clauses + +In addition, in the rules below, we sometimes do some transformations +on the lowered where clauses, as defined here: + +- `FromEnv(WC)` – this indicates that: + - `Implemented(TraitRef)` becomes `FromEnv(TraitRef)` + - other where-clauses are left intact +- `WellFormed(WC)` – this indicates that: + - `Implemented(TraitRef)` becomes `WellFormed(TraitRef)` + - other where-clauses are left intact + +*TODO*: I suspect that we want to alter the outlives relations too, +but Chalk isn't modeling those right now. + +## Lowering traits + +Given a trait definition + +```rust,ignore +trait Trait // P0 == Self +where WC +{ + // trait items +} +``` + +we will produce a number of declarations. This section is focused on +the program clauses for the trait header (i.e., the stuff outside the +`{}`); the [section on trait items](#trait-items) covers the stuff +inside the `{}`. + +### Trait header + +From the trait itself we mostly make "meta" rules that setup the +relationships between different kinds of domain goals. The first such +rule from the trait header creates the mapping between the `FromEnv` +and `Implemented` predicates: + +```text +// Rule Implemented-From-Env +forall { + Implemented(Self: Trait) :- FromEnv(Self: Trait) +} +``` + + + +#### Implied bounds + +The next few clauses have to do with implied bounds (see also +[RFC 2089] and the [implied bounds][implied_bounds] chapter for a more in depth +cover). For each trait, we produce two clauses: + +[RFC 2089]: https://rust-lang.github.io/rfcs/2089-implied-bounds.html +[implied_bounds]: ./implied-bounds.md + +```text +// Rule Implied-Bound-From-Trait +// +// For each where clause WC: +forall { + FromEnv(WC) :- FromEnv(Self: Trait { + WellFormed(Self: Trait) :- Implemented(Self: Trait) && WellFormed(WC) +} +``` + +This `WellFormed` rule states that `T: Trait` is well-formed if (a) +`T: Trait` is implemented and (b) all the where-clauses declared on +`Trait` are well-formed (and hence they are implemented). Remember +that the `WellFormed` predicate is +[coinductive](./goals-and-clauses.html#coinductive); in this +case, it is serving as a kind of "carrier" that allows us to enumerate +all the where clauses that are transitively implied by `T: Trait`. + +An example: + +```rust,ignore +trait Foo: A + Bar { } +trait Bar: B + Foo { } +trait A { } +trait B { } +``` + +Here, the transitive set of implications for `T: Foo` are `T: A`, `T: Bar`, and +`T: B`. And indeed if we were to try to prove `WellFormed(T: Foo)`, we would +have to prove each one of those: + +- `WellFormed(T: Foo)` + - `Implemented(T: Foo)` + - `WellFormed(T: A)` + - `Implemented(T: A)` + - `WellFormed(T: Bar)` + - `Implemented(T: Bar)` + - `WellFormed(T: B)` + - `Implemented(T: Bar)` + - `WellFormed(T: Foo)` -- cycle, true coinductively + +This `WellFormed` predicate is only used when proving that impls are +well-formed – basically, for each impl of some trait ref `TraitRef`, +we must show that `WellFormed(TraitRef)`. This in turn justifies the +implied bounds rules that allow us to extend the set of `FromEnv` +items. + +## Lowering type definitions + +We also want to have some rules which define when a type is well-formed. +For example, given this type: + +```rust,ignore +struct Set where K: Hash { ... } +``` + +then `Set` is well-formed because `i32` implements `Hash`, but +`Set` would not be well-formed. Basically, a type is well-formed +if its parameters verify the where clauses written on the type definition. + +Hence, for every type definition: + +```rust, ignore +struct Type where WC { ... } +``` + +we produce the following rule: + +```text +// Rule WellFormed-Type +forall { + WellFormed(Type) :- WC +} +``` + +Note that we use `struct` for defining a type, but this should be understood +as a general type definition (it could be e.g. a generic `enum`). + +Conversely, we define rules which say that if we assume that a type is +well-formed, we can also assume that its where clauses hold. That is, +we produce the following family of rules: + +```text +// Rule Implied-Bound-From-Type +// +// For each where clause `WC` +forall { + FromEnv(WC) :- FromEnv(Type) +} +``` + +As for the implied bounds RFC, functions will *assume* that their arguments +are well-formed. For example, suppose we have the following bit of code: + +```rust,ignore +trait Hash: Eq { } +struct Set { ... } + +fn foo(collection: Set, x: K, y: K) { + // `x` and `y` can be equalized even if we did not explicitly write + // `where K: Eq` + if x == y { + ... + } +} +``` + +In the `foo` function, we assume that `Set` is well-formed, i.e. we have +`FromEnv(Set)` in our environment. Because of the previous rule, we get + `FromEnv(K: Hash)` without needing an explicit where clause. And because +of the `Hash` trait definition, there also exists a rule which says: + +```text +forall { + FromEnv(K: Eq) :- FromEnv(K: Hash) +} +``` + +which means that we finally get `FromEnv(K: Eq)` and then can compare `x` +and `y` without needing an explicit where clause. + + + +## Lowering trait items + +### Associated type declarations + +Given a trait that declares a (possibly generic) associated type: + +```rust,ignore +trait Trait // P0 == Self +where WC +{ + type AssocType: Bounds where WC1; +} +``` + +We will produce a number of program clauses. The first two define +the rules by which `ProjectionEq` can succeed; these two clauses are discussed +in detail in the [section on associated types](./associated-types.html), +but reproduced here for reference: + +```text +// Rule ProjectionEq-Normalize +// +// ProjectionEq can succeed by normalizing: +forall { + ProjectionEq(>::AssocType = U) :- + Normalize(>::AssocType -> U) +} +``` + +```text +// Rule ProjectionEq-Placeholder +// +// ProjectionEq can succeed through the placeholder associated type, +// see "associated type" chapter for more: +forall { + ProjectionEq( + >::AssocType = + (Trait::AssocType) + ) +} +``` + +The next rule covers implied bounds for the projection. In particular, +the `Bounds` declared on the associated type must have been proven to hold +to show that the impl is well-formed, and hence we can rely on them +elsewhere. + +```text +// Rule Implied-Bound-From-AssocTy +// +// For each `Bound` in `Bounds`: +forall { + FromEnv(>::AssocType>: Bound) :- + FromEnv(Self: Trait) && WC1 +} +``` + +Next, we define the requirements for an instantiation of our associated +type to be well-formed... + +```text +// Rule WellFormed-AssocTy +forall { + WellFormed((Trait::AssocType)) :- + Implemented(Self: Trait) && WC1 +} +``` + +...along with the reverse implications, when we can assume that it is +well-formed. + +```text +// Rule Implied-WC-From-AssocTy +// +// For each where clause WC1: +forall { + FromEnv(WC1) :- FromEnv((Trait::AssocType)) +} +``` + +```text +// Rule Implied-Trait-From-AssocTy +forall { + FromEnv(Self: Trait) :- + FromEnv((Trait::AssocType)) +} +``` + +### Lowering function and constant declarations + +Chalk didn't model functions and constants, but I would eventually like to +treat them exactly like normalization. See [the section on function/constant +values below](#constant-vals) for more details. + +## Lowering impls + +Given an impl of a trait: + +```rust,ignore +impl Trait for A0 +where WC +{ + // zero or more impl items +} +``` + +Let `TraitRef` be the trait reference `A0: Trait`. Then we +will create the following rules: + +```text +// Rule Implemented-From-Impl +forall { + Implemented(TraitRef) :- WC +} +``` + +In addition, we will lower all of the *impl items*. + +## Lowering impl items + +### Associated type values + +Given an impl that contains: + +```rust,ignore +impl Trait for P0 +where WC_impl +{ + type AssocType = T; +} +``` + +and our where clause `WC1` on the trait associated type from above, we +produce the following rule: + +```text +// Rule Normalize-From-Impl +forall { + forall { + Normalize(>::AssocType -> T) :- + Implemented(P0 as Trait) && WC1 + } +} +``` + +Note that `WC_impl` and `WC1` both encode where-clauses that the impl can +rely on. (`WC_impl` is not used here, because it is implied by +`Implemented(P0 as Trait)`.) + + + +### Function and constant values + +Chalk didn't model functions and constants, but I would eventually +like to treat them exactly like normalization. This presumably +involves adding a new kind of parameter (constant), and then having a +`NormalizeValue` domain goal. This is *to be written* because the +details are a bit up in the air. diff --git a/src/doc/rustc-guide/src/traits/lowering-to-logic.md b/src/doc/rustc-guide/src/traits/lowering-to-logic.md new file mode 100644 index 0000000000..e1a6c1361c --- /dev/null +++ b/src/doc/rustc-guide/src/traits/lowering-to-logic.md @@ -0,0 +1,185 @@ +# Lowering to logic + +The key observation here is that the Rust trait system is basically a +kind of logic, and it can be mapped onto standard logical inference +rules. We can then look for solutions to those inference rules in a +very similar fashion to how e.g. a [Prolog] solver works. It turns out +that we can't *quite* use Prolog rules (also called Horn clauses) but +rather need a somewhat more expressive variant. + +[Prolog]: https://en.wikipedia.org/wiki/Prolog + +## Rust traits and logic + +One of the first observations is that the Rust trait system is +basically a kind of logic. As such, we can map our struct, trait, and +impl declarations into logical inference rules. For the most part, +these are basically Horn clauses, though we'll see that to capture the +full richness of Rust – and in particular to support generic +programming – we have to go a bit further than standard Horn clauses. + +To see how this mapping works, let's start with an example. Imagine +we declare a trait and a few impls, like so: + +```rust +trait Clone { } +impl Clone for usize { } +impl Clone for Vec where T: Clone { } +``` + +We could map these declarations to some Horn clauses, written in a +Prolog-like notation, as follows: + +```text +Clone(usize). +Clone(Vec) :- Clone(?T). + +// The notation `A :- B` means "A is true if B is true". +// Or, put another way, B implies A. +``` + +In Prolog terms, we might say that `Clone(Foo)` – where `Foo` is some +Rust type – is a *predicate* that represents the idea that the type +`Foo` implements `Clone`. These rules are **program clauses**; they +state the conditions under which that predicate can be proven (i.e., +considered true). So the first rule just says "Clone is implemented +for `usize`". The next rule says "for any type `?T`, Clone is +implemented for `Vec` if clone is implemented for `?T`". So +e.g. if we wanted to prove that `Clone(Vec>)`, we would do +so by applying the rules recursively: + +- `Clone(Vec>)` is provable if: + - `Clone(Vec)` is provable if: + - `Clone(usize)` is provable. (Which it is, so we're all good.) + +But now suppose we tried to prove that `Clone(Vec)`. This would +fail (after all, I didn't give an impl of `Clone` for `Bar`): + +- `Clone(Vec)` is provable if: + - `Clone(Bar)` is provable. (But it is not, as there are no applicable rules.) + +We can easily extend the example above to cover generic traits with +more than one input type. So imagine the `Eq` trait, which declares +that `Self` is equatable with a value of type `T`: + +```rust,ignore +trait Eq { ... } +impl Eq for usize { } +impl> Eq> for Vec { } +``` + +That could be mapped as follows: + +```text +Eq(usize, usize). +Eq(Vec, Vec) :- Eq(?T, ?U). +``` + +So far so good. + +## Type-checking normal functions + +OK, now that we have defined some logical rules that are able to +express when traits are implemented and to handle associated types, +let's turn our focus a bit towards **type-checking**. Type-checking is +interesting because it is what gives us the goals that we need to +prove. That is, everything we've seen so far has been about how we +derive the rules by which we can prove goals from the traits and impls +in the program; but we are also interested in how to derive the goals +that we need to prove, and those come from type-checking. + +Consider type-checking the function `foo()` here: + +```rust,ignore +fn foo() { bar::() } +fn bar>() { } +``` + +This function is very simple, of course: all it does is to call +`bar::()`. Now, looking at the definition of `bar()`, we can see +that it has one where-clause `U: Eq`. So, that means that `foo()` will +have to prove that `usize: Eq` in order to show that it can call `bar()` +with `usize` as the type argument. + +If we wanted, we could write a Prolog predicate that defines the +conditions under which `bar()` can be called. We'll say that those +conditions are called being "well-formed": + +```text +barWellFormed(?U) :- Eq(?U, ?U). +``` + +Then we can say that `foo()` type-checks if the reference to +`bar::` (that is, `bar()` applied to the type `usize`) is +well-formed: + +```text +fooTypeChecks :- barWellFormed(usize). +``` + +If we try to prove the goal `fooTypeChecks`, it will succeed: + +- `fooTypeChecks` is provable if: + - `barWellFormed(usize)`, which is provable if: + - `Eq(usize, usize)`, which is provable because of an impl. + +Ok, so far so good. Let's move on to type-checking a more complex function. + +## Type-checking generic functions: beyond Horn clauses + +In the last section, we used standard Prolog horn-clauses (augmented with Rust's +notion of type equality) to type-check some simple Rust functions. But that only +works when we are type-checking non-generic functions. If we want to type-check +a generic function, it turns out we need a stronger notion of goal than what Prolog +can provide. To see what I'm talking about, let's revamp our previous +example to make `foo` generic: + +```rust,ignore +fn foo>() { bar::() } +fn bar>() { } +``` + +To type-check the body of `foo`, we need to be able to hold the type +`T` "abstract". That is, we need to check that the body of `foo` is +type-safe *for all types `T`*, not just for some specific type. We might express +this like so: + +```text +fooTypeChecks :- + // for all types T... + forall { + // ...if we assume that Eq(T, T) is provable... + if (Eq(T, T)) { + // ...then we can prove that `barWellFormed(T)` holds. + barWellFormed(T) + } + }. +``` + +This notation I'm using here is the notation I've been using in my +prototype implementation; it's similar to standard mathematical +notation but a bit Rustified. Anyway, the problem is that standard +Horn clauses don't allow universal quantification (`forall`) or +implication (`if`) in goals (though many Prolog engines do support +them, as an extension). For this reason, we need to accept something +called "first-order hereditary harrop" (FOHH) clauses – this long +name basically means "standard Horn clauses with `forall` and `if` in +the body". But it's nice to know the proper name, because there is a +lot of work describing how to efficiently handle FOHH clauses; see for +example Gopalan Nadathur's excellent +["A Proof Procedure for the Logic of Hereditary Harrop Formulas"][pphhf] +in [the bibliography]. + +[the bibliography]: ./bibliography.html +[pphhf]: ./bibliography.html#pphhf + +It turns out that supporting FOHH is not really all that hard. And +once we are able to do that, we can easily describe the type-checking +rule for generic functions like `foo` in our logic. + +## Source + +This page is a lightly adapted version of a +[blog post by Nicholas Matsakis][lrtl]. + +[lrtl]: http://smallcultfollowing.com/babysteps/blog/2017/01/26/lowering-rust-traits-to-logic/ diff --git a/src/doc/rustc-guide/src/traits/regions.md b/src/doc/rustc-guide/src/traits/regions.md new file mode 100644 index 0000000000..4657529dc2 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/regions.md @@ -0,0 +1,9 @@ +# Region constraints + +*To be written.* + +Chalk does not have the concept of region constraints, and as of this +writing, work on rustc was not far enough to worry about them. + +In the meantime, you can read about region constraints in the +[type inference](../type-inference.html#region-constraints) section. diff --git a/src/doc/rustc-guide/src/traits/resolution.md b/src/doc/rustc-guide/src/traits/resolution.md new file mode 100644 index 0000000000..2ba4516779 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/resolution.md @@ -0,0 +1,312 @@ +# Trait resolution (old-style) + +This chapter describes the general process of _trait resolution_ and points out +some non-obvious things. + +**Note:** This chapter (and its subchapters) describe how the trait +solver **currently** works. However, we are in the process of +designing a new trait solver. If you'd prefer to read about *that*, +see [*this* traits chapter](./index.html). + +## Major concepts + +Trait resolution is the process of pairing up an impl with each +reference to a trait. So, for example, if there is a generic function like: + +```rust,ignore +fn clone_slice(x: &[T]) -> Vec { ... } +``` + +and then a call to that function: + +```rust,ignore +let v: Vec = clone_slice(&[1, 2, 3]) +``` + +it is the job of trait resolution to figure out whether there exists an impl of +(in this case) `isize : Clone`. + +Note that in some cases, like generic functions, we may not be able to +find a specific impl, but we can figure out that the caller must +provide an impl. For example, consider the body of `clone_slice`: + +```rust,ignore +fn clone_slice(x: &[T]) -> Vec { + let mut v = Vec::new(); + for e in &x { + v.push((*e).clone()); // (*) + } +} +``` + +The line marked `(*)` is only legal if `T` (the type of `*e`) +implements the `Clone` trait. Naturally, since we don't know what `T` +is, we can't find the specific impl; but based on the bound `T:Clone`, +we can say that there exists an impl which the caller must provide. + +We use the term *obligation* to refer to a trait reference in need of +an impl. Basically, the trait resolution system resolves an obligation +by proving that an appropriate impl does exist. + +During type checking, we do not store the results of trait selection. +We simply wish to verify that trait selection will succeed. Then +later, at trans time, when we have all concrete types available, we +can repeat the trait selection to choose an actual implementation, which +will then be generated in the output binary. + +## Overview + +Trait resolution consists of three major parts: + +- **Selection**: Deciding how to resolve a specific obligation. For + example, selection might decide that a specific obligation can be + resolved by employing an impl which matches the `Self` type, or by using a + parameter bound (e.g. `T: Trait`). In the case of an impl, selecting one + obligation can create *nested obligations* because of where clauses + on the impl itself. It may also require evaluating those nested + obligations to resolve ambiguities. + +- **Fulfillment**: The fulfillment code is what tracks that obligations + are completely fulfilled. Basically it is a worklist of obligations + to be selected: once selection is successful, the obligation is + removed from the worklist and any nested obligations are enqueued. + +- **Coherence**: The coherence checks are intended to ensure that there + are never overlapping impls, where two impls could be used with + equal precedence. + +## Selection + +Selection is the process of deciding whether an obligation can be +resolved and, if so, how it is to be resolved (via impl, where clause, etc). +The main interface is the `select()` function, which takes an obligation +and returns a `SelectionResult`. There are three possible outcomes: + +- `Ok(Some(selection))` – yes, the obligation can be resolved, and + `selection` indicates how. If the impl was resolved via an impl, + then `selection` may also indicate nested obligations that are required + by the impl. + +- `Ok(None)` – we are not yet sure whether the obligation can be + resolved or not. This happens most commonly when the obligation + contains unbound type variables. + +- `Err(err)` – the obligation definitely cannot be resolved due to a + type error or because there are no impls that could possibly apply. + +The basic algorithm for selection is broken into two big phases: +candidate assembly and confirmation. + +Note that because of how lifetime inference works, it is not possible to +give back immediate feedback as to whether a unification or subtype +relationship between lifetimes holds or not. Therefore, lifetime +matching is *not* considered during selection. This is reflected in +the fact that subregion assignment is infallible. This may yield +lifetime constraints that will later be found to be in error (in +contrast, the non-lifetime-constraints have already been checked +during selection and can never cause an error, though naturally they +may lead to other errors downstream). + +### Candidate assembly + +Searches for impls/where-clauses/etc that might +possibly be used to satisfy the obligation. Each of those is called +a candidate. To avoid ambiguity, we want to find exactly one +candidate that is definitively applicable. In some cases, we may not +know whether an impl/where-clause applies or not – this occurs when +the obligation contains unbound inference variables. + +The subroutines that decide whether a particular impl/where-clause/etc +applies to a particular obligation are collectively referred to as the +process of _matching_. At the moment, this amounts to +unifying the `Self` types, but in the future we may also recursively +consider some of the nested obligations, in the case of an impl. + +**TODO**: what does "unifying the `Self` types" mean? The `Self` of the +obligation with that of an impl? + +The basic idea for candidate assembly is to do a first pass in which +we identify all possible candidates. During this pass, all that we do +is try and unify the type parameters. (In particular, we ignore any +nested where clauses.) Presuming that this unification succeeds, the +impl is added as a candidate. + +Once this first pass is done, we can examine the set of candidates. If +it is a singleton set, then we are done: this is the only impl in +scope that could possibly apply. Otherwise, we can winnow down the set +of candidates by using where clauses and other conditions. If this +reduced set yields a single, unambiguous entry, we're good to go, +otherwise the result is considered ambiguous. + +#### The basic process: Inferring based on the impls we see + +This process is easier if we work through some examples. Consider +the following trait: + +```rust,ignore +trait Convert { + fn convert(&self) -> Target; +} +``` + +This trait just has one method. It's about as simple as it gets. It +converts from the (implicit) `Self` type to the `Target` type. If we +wanted to permit conversion between `isize` and `usize`, we might +implement `Convert` like so: + +```rust,ignore +impl Convert for isize { ... } // isize -> usize +impl Convert for usize { ... } // usize -> isize +``` + +Now imagine there is some code like the following: + +```rust,ignore +let x: isize = ...; +let y = x.convert(); +``` + +The call to convert will generate a trait reference `Convert<$Y> for +isize`, where `$Y` is the type variable representing the type of +`y`. Of the two impls we can see, the only one that matches is +`Convert for isize`. Therefore, we can +select this impl, which will cause the type of `$Y` to be unified to +`usize`. (Note that while assembling candidates, we do the initial +unifications in a transaction, so that they don't affect one another.) + +**TODO**: The example says we can "select" the impl, but this section is +talking specifically about candidate assembly. Does this mean we can sometimes +skip confirmation? Or is this poor wording? +**TODO**: Is the unification of `$Y` part of trait resolution or type +inference? Or is this not the same type of "inference variable" as in type +inference? + +#### Winnowing: Resolving ambiguities + +But what happens if there are multiple impls where all the types +unify? Consider this example: + +```rust,ignore +trait Get { + fn get(&self) -> Self; +} + +impl Get for T { + fn get(&self) -> T { *self } +} + +impl Get for Box { + fn get(&self) -> Box { Box::new(get_it(&**self)) } +} +``` + +What happens when we invoke `get_it(&Box::new(1_u16))`, for example? In this +case, the `Self` type is `Box` – that unifies with both impls, +because the first applies to all types `T`, and the second to all +`Box`. In order for this to be unambiguous, the compiler does a *winnowing* +pass that considers `where` clauses +and attempts to remove candidates. In this case, the first impl only +applies if `Box : Copy`, which doesn't hold. After winnowing, +then, we are left with just one candidate, so we can proceed. + +#### `where` clauses + +Besides an impl, the other major way to resolve an obligation is via a +where clause. The selection process is always given a [parameter +environment] which contains a list of where clauses, which are +basically obligations that we can assume are satisfiable. We will iterate +over that list and check whether our current obligation can be found +in that list. If so, it is considered satisfied. More precisely, we +want to check whether there is a where-clause obligation that is for +the same trait (or some subtrait) and which can match against the obligation. + +[parameter environment]: ../param_env.html + +Consider this simple example: + +```rust,ignore +trait A1 { + fn do_a1(&self); +} +trait A2 : A1 { ... } + +trait B { + fn do_b(&self); +} + +fn foo(x: X) { + x.do_a1(); // (*) + x.do_b(); // (#) +} +``` + +In the body of `foo`, clearly we can use methods of `A1`, `A2`, or `B` +on variable `x`. The line marked `(*)` will incur an obligation `X: A1`, +while the line marked `(#)` will incur an obligation `X: B`. Meanwhile, +the parameter environment will contain two where-clauses: `X : A2` and `X : B`. +For each obligation, then, we search this list of where-clauses. The +obligation `X: B` trivially matches against the where-clause `X: B`. +To resolve an obligation `X:A1`, we would note that `X:A2` implies that `X:A1`. + +### Confirmation + +_Confirmation_ unifies the output type parameters of the trait with the +values found in the obligation, possibly yielding a type error. + +Suppose we have the following variation of the `Convert` example in the +previous section: + +```rust,ignore +trait Convert { + fn convert(&self) -> Target; +} + +impl Convert for isize { ... } // isize -> usize +impl Convert for usize { ... } // usize -> isize + +let x: isize = ...; +let y: char = x.convert(); // NOTE: `y: char` now! +``` + +Confirmation is where an error would be reported because the impl specified +that `Target` would be `usize`, but the obligation reported `char`. Hence the +result of selection would be an error. + +Note that the candidate impl is chosen based on the `Self` type, but +confirmation is done based on (in this case) the `Target` type parameter. + +### Selection during translation + +As mentioned above, during type checking, we do not store the results of trait +selection. At trans time, we repeat the trait selection to choose a particular +impl for each method call. In this second selection, we do not consider any +where-clauses to be in scope because we know that each resolution will resolve +to a particular impl. + +One interesting twist has to do with nested obligations. In general, in trans, +we only need to do a "shallow" selection for an obligation. That is, we wish to +identify which impl applies, but we do not (yet) need to decide how to select +any nested obligations. Nonetheless, we *do* currently do a complete resolution, +and that is because it can sometimes inform the results of type inference. +That is, we do not have the full substitutions in terms of the type variables +of the impl available to us, so we must run trait selection to figure +everything out. + +**TODO**: is this still talking about trans? + +Here is an example: + +```rust,ignore +trait Foo { ... } +impl> Foo for Vec { ... } + +impl Bar for isize { ... } +``` + +After one shallow round of selection for an obligation like `Vec +: Foo`, we would know which impl we want, and we would know that +`T=isize`, but we do not know the type of `U`. We must select the +nested obligation `isize : Bar` to find out that `U=usize`. + +It would be good to only do *just as much* nested resolution as +necessary. Currently, though, we just do a full resolution. diff --git a/src/doc/rustc-guide/src/traits/slg.md b/src/doc/rustc-guide/src/traits/slg.md new file mode 100644 index 0000000000..dcddd01f58 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/slg.md @@ -0,0 +1,302 @@ +# The On-Demand SLG solver + +Given a set of program clauses (provided by our [lowering rules][lowering]) +and a query, we need to return the result of the query and the value of any +type variables we can determine. This is the job of the solver. + +For example, `exists { Vec: FromIterator }` has one solution, so +its result is `Unique; substitution [?T := u32]`. A solution also comes with +a set of region constraints, which we'll ignore in this introduction. + +[lowering]: ./lowering-rules.html + +## Goals of the Solver + +### On demand + +There are often many, or even infinitely many, solutions to a query. For +example, say we want to prove that `exists { Vec: Debug }` for _some_ +type `?T`. Our solver should be capable of yielding one answer at a time, say +`?T = u32`, then `?T = i32`, and so on, rather than iterating over every type +in the type system. If we need more answers, we can request more until we are +done. This is similar to how Prolog works. + +*See also: [The traditional, interactive Prolog query][pq]* + +[pq]: ./canonical-queries.html#the-traditional-interactive-prolog-query + +### Breadth-first + +`Vec: Debug` is true if `?T: Debug`. This leads to a cycle: `[Vec, +Vec>, Vec>>]`, and so on all implement `Debug`. Our +solver ought to be breadth first and consider answers like `[Vec: Debug, +Vec: Debug, ...]` before it recurses, or we may never find the answer +we're looking for. + +### Cachable + +To speed up compilation, we need to cache results, including partial results +left over from past solver queries. + +## Description of how it works + +The basis of the solver is the [`Forest`] type. A *forest* stores a +collection of *tables* as well as a *stack*. Each *table* represents +the stored results of a particular query that is being performed, as +well as the various *strands*, which are basically suspended +computations that may be used to find more answers. Tables are +interdependent: solving one query may require solving others. + +[`Forest`]: https://rust-lang-nursery.github.io/chalk/doc/chalk_engine/forest/struct.Forest.html + +### Walkthrough + +Perhaps the easiest way to explain how the solver works is to walk +through an example. Let's imagine that we have the following program: + +```rust,ignore +trait Debug { } + +struct u32 { } +impl Debug for u32 { } + +struct Rc { } +impl Debug for Rc { } + +struct Vec { } +impl Debug for Vec { } +``` + +Now imagine that we want to find answers for the query `exists { Rc: +Debug }`. The first step would be to u-canonicalize this query; this is the +act of giving canonical names to all the unbound inference variables based on +the order of their left-most appearance, as well as canonicalizing the +universes of any universally bound names (e.g., the `T` in `forall { ... +}`). In this case, there are no universally bound names, but the canonical +form Q of the query might look something like: + +```text +Rc: Debug +``` + +where `?0` is a variable in the root universe U0. We would then go and +look for a table with this canonical query as the key: since the forest is +empty, this lookup will fail, and we will create a new table T0, +corresponding to the u-canonical goal Q. + +**Ignoring negative reasoning and regions.** To start, we'll ignore +the possibility of negative goals like `not { Foo }`. We'll phase them +in later, as they bring several complications. + +**Creating a table.** When we first create a table, we also initialize +it with a set of *initial strands*. A "strand" is kind of like a +"thread" for the solver: it contains a particular way to produce an +answer. The initial set of strands for a goal like `Rc: Debug` +(i.e., a "domain goal") is determined by looking for *clauses* in the +environment. In Rust, these clauses derive from impls, but also from +where-clauses that are in scope. In the case of our example, there +would be three clauses, each coming from the program. Using a +Prolog-like notation, these look like: + +```text +(u32: Debug). +(Rc: Debug) :- (T: Debug). +(Vec: Debug) :- (T: Debug). +``` + +To create our initial strands, then, we will try to apply each of +these clauses to our goal of `Rc: Debug`. The first and third +clauses are inapplicable because `u32` and `Vec` cannot be unified +with `Rc`. The second clause, however, will work. + +**What is a strand?** Let's talk a bit more about what a strand *is*. In the code, a strand +is the combination of an inference table, an _X-clause_, and (possibly) +a selected subgoal from that X-clause. But what is an X-clause +([`ExClause`], in the code)? An X-clause pulls together a few things: + +- The current state of the goal we are trying to prove; +- A set of subgoals that have yet to be proven; +- There are also a few things we're ignoring for now: + - delayed literals, region constraints + +The general form of an X-clause is written much like a Prolog clause, +but with somewhat different semantics. Since we're ignoring delayed +literals and region constraints, an X-clause just looks like this: + +```text +G :- L +``` + +where G is a goal and L is a set of subgoals that must be proven. +(The L stands for *literal* -- when we address negative reasoning, a +literal will be either a positive or negative subgoal.) The idea is +that if we are able to prove L then the goal G can be considered true. + +In the case of our example, we would wind up creating one strand, with +an X-clause like so: + +```text +(Rc: Debug) :- (?T: Debug) +``` + +Here, the `?T` refers to one of the inference variables created in the +inference table that accompanies the strand. (I'll use named variables +to refer to inference variables, and numbered variables like `?0` to +refer to variables in a canonicalized goal; in the code, however, they +are both represented with an index.) + +For each strand, we also optionally store a *selected subgoal*. This +is the subgoal after the turnstile (`:-`) that we are currently trying +to prove in this strand. Initally, when a strand is first created, +there is no selected subgoal. + +[`ExClause`]: https://rust-lang-nursery.github.io/chalk/doc/chalk_engine/struct.ExClause.html + +**Activating a strand.** Now that we have created the table T0 and +initialized it with strands, we have to actually try and produce an answer. +We do this by invoking the [`ensure_root_answer`] operation on the table: +specifically, we say `ensure_root_answer(T0, A0)`, meaning "ensure that there +is a 0th answer A0 to query T0". + +Remember that tables store not only strands, but also a vector of cached +answers. The first thing that [`ensure_root_answer`] does is to check whether +answer A0 is in this vector. If so, we can just return immediately. In this +case, the vector will be empty, and hence that does not apply (this becomes +important for cyclic checks later on). + +When there is no cached answer, [`ensure_root_answer`] will try to produce one. +It does this by selecting a strand from the set of active strands -- the +strands are stored in a `VecDeque` and hence processed in a round-robin +fashion. Right now, we have only one strand, storing the following X-clause +with no selected subgoal: + +```text +(Rc: Debug) :- (?T: Debug) +``` + +When we activate the strand, we see that we have no selected subgoal, +and so we first pick one of the subgoals to process. Here, there is only +one (`?T: Debug`), so that becomes the selected subgoal, changing +the state of the strand to: + +```text +(Rc: Debug) :- selected(?T: Debug, A0) +``` + +Here, we write `selected(L, An)` to indicate that (a) the literal `L` +is the selected subgoal and (b) which answer `An` we are looking for. We +start out looking for `A0`. + +[`ensure_root_answer`]: https://rust-lang-nursery.github.io/chalk/doc/chalk_engine/forest/struct.Forest.html#method.ensure_root_answer + +**Processing the selected subgoal.** Next, we have to try and find an +answer to this selected goal. To do that, we will u-canonicalize it +and try to find an associated table. In this case, the u-canonical +form of the subgoal is `?0: Debug`: we don't have a table yet for +that, so we can create a new one, T1. As before, we'll initialize T1 +with strands. In this case, there will be three strands, because all +the program clauses are potentially applicable. Those three strands +will be: + +- `(u32: Debug) :-`, derived from the program clause `(u32: Debug).`. + - Note: This strand has no subgoals. +- `(Vec: Debug) :- (?U: Debug)`, derived from the `Vec` impl. +- `(Rc: Debug) :- (?U: Debug)`, derived from the `Rc` impl. + +We can thus summarize the state of the whole forest at this point as +follows: + +```text +Table T0 [Rc: Debug] + Strands: + (Rc: Debug) :- selected(?T: Debug, A0) + +Table T1 [?0: Debug] + Strands: + (u32: Debug) :- + (Vec: Debug) :- (?U: Debug) + (Rc: Debug) :- (?V: Debug) +``` + +**Delegation between tables.** Now that the active strand from T0 has +created the table T1, it can try to extract an answer. It does this +via that same `ensure_answer` operation we saw before. In this case, +the strand would invoke `ensure_answer(T1, A0)`, since we will start +with the first answer. This will cause T1 to activate its first +strand, `u32: Debug :-`. + +This strand is somewhat special: it has no subgoals at all. This means +that the goal is proven. We can therefore add `u32: Debug` to the set +of *answers* for our table, calling it answer A0 (it is the first +answer). The strand is then removed from the list of strands. + +The state of table T1 is therefore: + +```text +Table T1 [?0: Debug] + Answers: + A0 = [?0 = u32] + Strand: + (Vec: Debug) :- (?U: Debug) + (Rc: Debug) :- (?V: Debug) +``` + +Note that I am writing out the answer A0 as a substitution that can be +applied to the table goal; actually, in the code, the goals for each +X-clause are also represented as substitutions, but in this exposition +I've chosen to write them as full goals, following [NFTD]. + +[NFTD]: ./bibliography.html#slg + +Since we now have an answer, `ensure_answer(T1, A0)` will return `Ok` +to the table T0, indicating that answer A0 is available. T0 now has +the job of incorporating that result into its active strand. It does +this in two ways. First, it creates a new strand that is looking for +the next possible answer of T1. Next, it incorpoates the answer from +A0 and removes the subgoal. The resulting state of table T0 is: + +```text +Table T0 [Rc: Debug] + Strands: + (Rc: Debug) :- selected(?T: Debug, A1) + (Rc: Debug) :- +``` + +We then immediately activate the strand that incorporated the answer +(the `Rc: Debug` one). In this case, that strand has no further +subgoals, so it becomes an answer to the table T0. This answer can +then be returned up to our caller, and the whole forest goes quiescent +at this point (remember, we only do enough work to generate *one* +answer). The ending state of the forest at this point will be: + +```text +Table T0 [Rc: Debug] + Answer: + A0 = [?0 = u32] + Strands: + (Rc: Debug) :- selected(?T: Debug, A1) + +Table T1 [?0: Debug] + Answers: + A0 = [?0 = u32] + Strand: + (Vec: Debug) :- (?U: Debug) + (Rc: Debug) :- (?V: Debug) +``` + +Here you can see how the forest captures both the answers we have +created thus far *and* the strands that will let us try to produce +more answers later on. + +## See also + +- [chalk_solve README][readme], which contains links to papers used and + acronyms referenced in the code +- This section is a lightly adapted version of the blog post [An on-demand + SLG solver for chalk][slg-blog] +- [Negative Reasoning in Chalk][negative-reasoning-blog] explains the need + for negative reasoning, but not how the SLG solver does it + +[readme]: https://github.com/rust-lang-nursery/chalk/blob/239e4ae4e69b2785b5f99e0f2b41fc16b0b4e65e/chalk-engine/src/README.md +[slg-blog]: http://smallcultfollowing.com/babysteps/blog/2018/01/31/an-on-demand-slg-solver-for-chalk/ +[negative-reasoning-blog]: http://aturon.github.io/blog/2017/04/24/negative-chalk/ diff --git a/src/doc/rustc-guide/src/traits/specialization.md b/src/doc/rustc-guide/src/traits/specialization.md new file mode 100644 index 0000000000..671e5e0168 --- /dev/null +++ b/src/doc/rustc-guide/src/traits/specialization.md @@ -0,0 +1,42 @@ +# Specialization + +**TODO**: where does Chalk fit in? Should we mention/discuss it here? + +Defined in the `specialize` module. + +The basic strategy is to build up a *specialization graph* during +coherence checking (recall that coherence checking looks for overlapping +impls). Insertion into the graph locates the right place +to put an impl in the specialization hierarchy; if there is no right +place (due to partial overlap but no containment), you get an overlap +error. Specialization is consulted when selecting an impl (of course), +and the graph is consulted when propagating defaults down the +specialization hierarchy. + +You might expect that the specialization graph would be used during +selection – i.e. when actually performing specialization. This is +not done for two reasons: + +- It's merely an optimization: given a set of candidates that apply, + we can determine the most specialized one by comparing them directly + for specialization, rather than consulting the graph. Given that we + also cache the results of selection, the benefit of this + optimization is questionable. + +- To build the specialization graph in the first place, we need to use + selection (because we need to determine whether one impl specializes + another). Dealing with this reentrancy would require some additional + mode switch for selection. Given that there seems to be no strong + reason to use the graph anyway, we stick with a simpler approach in + selection, and use the graph only for propagating default + implementations. + +Trait impl selection can succeed even when multiple impls can apply, +as long as they are part of the same specialization family. In that +case, it returns a *single* impl on success – this is the most +specialized impl *known* to apply. However, if there are any inference +variables in play, the returned impl may not be the actual impl we +will use at trans time. Thus, we take special care to avoid projecting +associated types unless either (1) the associated type does not use +`default` and thus cannot be overridden or (2) all input types are +known concretely. diff --git a/src/doc/rustc-guide/src/traits/wf.md b/src/doc/rustc-guide/src/traits/wf.md new file mode 100644 index 0000000000..f0cb03caab --- /dev/null +++ b/src/doc/rustc-guide/src/traits/wf.md @@ -0,0 +1,469 @@ +# Well-formedness checking + +WF checking has the job of checking that the various declarations in a Rust +program are well-formed. This is the basis for implied bounds, and partly for +that reason, this checking can be surprisingly subtle! For example, we +have to be sure that each impl proves the WF conditions declared on +the trait. + +For each declaration in a Rust program, we will generate a logical goal and try +to prove it using the lowered rules we described in the +[lowering rules](./lowering-rules.md) chapter. If we are able to prove it, we +say that the construct is well-formed. If not, we report an error to the user. + +Well-formedness checking happens in the [`src/rules/wf.rs`][wf] module in +chalk. After you have read this chapter, you may find useful to see an +extended set of examples in the [`src/rules/wf/test.rs`][wf_test] submodule. + +The new-style WF checking has not been implemented in rustc yet. + +[wf]: https://github.com/rust-lang-nursery/chalk/blob/master/src/rules/wf.rs +[wf_test]: https://github.com/rust-lang-nursery/chalk/blob/master/src/rules/wf/test.rs + +We give here a complete reference of the generated goals for each Rust +declaration. + +In addition to the notations introduced in the chapter about +lowering rules, we'll introduce another notation: when checking WF of a +declaration, we'll often have to prove that all types that appear are +well-formed, except type parameters that we always assume to be WF. Hence, +we'll use the following notation: for a type `SomeType<...>`, we define +`InputTypes(SomeType<...>)` to be the set of all non-parameter types appearing +in `SomeType<...>`, including `SomeType<...>` itself. + +Examples: +* `InputTypes((u32, f32)) = [u32, f32, (u32, f32)]` +* `InputTypes(Box) = [Box]` (assuming that `T` is a type parameter) +* `InputTypes(Box>) = [Box, Box>]` + +We also extend the `InputTypes` notation to where clauses in the natural way. +So, for example `InputTypes(A0: Trait)` is the union of +`InputTypes(A0)`, `InputTypes(A1)`, ..., `InputTypes(An)`. + +# Type definitions + +Given a general type definition: +```rust,ignore +struct Type where WC_type { + field1: A1, + ... + fieldn: An, +} +``` + +we generate the following goal, which represents its well-formedness condition: +```text +forall { + if (FromEnv(WC_type)) { + WellFormed(InputTypes(WC_type)) && + WellFormed(InputTypes(A1)) && + ... + WellFormed(InputTypes(An)) + } +} +``` + +which in English states: assuming that the where clauses defined on the type +hold, prove that every type appearing in the type definition is well-formed. + +Some examples: +```rust,ignore +struct OnlyClone where T: Clone { + clonable: T, +} +// The only types appearing are type parameters: we have nothing to check, +// the type definition is well-formed. +``` + +```rust,ignore +struct Foo where T: Clone { + foo: OnlyClone, +} +// The only non-parameter type which appears in this definition is +// `OnlyClone`. The generated goal is the following: +// ``` +// forall { +// if (FromEnv(T: Clone)) { +// WellFormed(OnlyClone) +// } +// } +// ``` +// which is provable. +``` + +```rust,ignore +struct Bar where ::Item: Debug { + bar: i32, +} +// The only non-parameter types which appear in this definition are +// `::Item` and `i32`. The generated goal is the following: +// ``` +// forall { +// if (FromEnv(::Item: Debug)) { +// WellFormed(::Item) && +// WellFormed(i32) +// } +// } +// ``` +// which is not provable since `WellFormed(::Item)` requires +// proving `Implemented(T: Iterator)`, and we are unable to prove that for an +// unknown `T`. +// +// Hence, this type definition is considered illegal. An additional +// `where T: Iterator` would make it legal. +``` + +# Trait definitions + +Given a general trait definition: +```rust,ignore +trait Trait where WC_trait { + type Assoc: Bounds_assoc where WC_assoc; +} +``` + +we generate the following goal: +```text +forall { + if (FromEnv(WC_trait)) { + WellFormed(InputTypes(WC_trait)) && + + forall { + if (FromEnv(WC_assoc)) { + WellFormed(InputTypes(Bounds_assoc)) && + WellFormed(InputTypes(WC_assoc)) + } + } + } +} +``` + +There is not much to verify in a trait definition. We just want +to prove that the types appearing in the trait definition are well-formed, +under the assumption that the different where clauses hold. + +Some examples: +```rust,ignore +trait Foo where T: Iterator, ::Item: Debug { + ... +} +// The only non-parameter type which appears in this definition is +// `::Item`. The generated goal is the following: +// ``` +// forall { +// if (FromEnv(T: Iterator), FromEnv(::Item: Debug)) { +// WellFormed(::Item) +// } +// } +// ``` +// which is provable thanks to the `FromEnv(T: Iterator)` assumption. +``` + +```rust,ignore +trait Bar { + type Assoc: From<::Item>; +} +// The only non-parameter type which appears in this definition is +// `::Item`. The generated goal is the following: +// ``` +// forall { +// WellFormed(::Item) +// } +// ``` +// which is not provable, hence the trait definition is considered illegal. +``` + +```rust,ignore +trait Baz { + type Assoc: From<::Item> where T: Iterator; +} +// The generated goal is now: +// ``` +// forall { +// if (FromEnv(T: Iterator)) { +// WellFormed(::Item) +// } +// } +// ``` +// which is now provable. +``` + +# Impls + +Now we give ourselves a general impl for the trait defined above: +```rust,ignore +impl Trait for SomeType where WC_impl { + type Assoc = SomeValue where WC_assoc; +} +``` + +Note that here, `WC_assoc` are the same where clauses as those defined on the +associated type definition in the trait declaration, *except* that type +parameters from the trait are substituted with values provided by the impl +(see example below). You cannot add new where clauses. You may omit to write +the where clauses if you want to emphasize the fact that you are actually not +relying on them. + +Some examples to illustrate that: +```rust,ignore +trait Foo { + type Assoc where T: Clone; +} + +struct OnlyClone { ... } + +impl Foo> for () { + // We substitute type parameters from the trait by the ones provided + // by the impl, that is instead of having a `T: Clone` where clause, + // we have an `Option: Clone` one. + type Assoc = OnlyClone> where Option: Clone; +} + +impl Foo for i32 { + // I'm not using the `T: Clone` where clause from the trait, so I can + // omit it. + type Assoc = u32; +} + +impl Foo for f32 { + type Assoc = OnlyClone> where Option: Clone; + // ^^^^^^^^^^^^^^^^^^^^^^ + // this where clause does not exist + // on the original trait decl: illegal +} +``` + +> So in Rust, where clauses on associated types work *exactly* like where +> clauses on trait methods: in an impl, we must substitute the parameters from +> the traits with values provided by the impl, we may omit them if we don't +> need them, but we cannot add new where clauses. + +Now let's see the generated goal for this general impl: +```text +forall { + // Well-formedness of types appearing in the impl + if (FromEnv(WC_impl), FromEnv(InputTypes(SomeType: Trait))) { + WellFormed(InputTypes(WC_impl)) && + + forall { + if (FromEnv(WC_assoc)) { + WellFormed(InputTypes(SomeValue)) + } + } + } + + // Implied bounds checking + if (FromEnv(WC_impl), FromEnv(InputTypes(SomeType: Trait))) { + WellFormed(SomeType: Trait) && + + forall { + if (FromEnv(WC_assoc)) { + WellFormed(SomeValue: Bounds_assoc) + } + } + } +} +``` + +Here is the most complex goal. As always, first, assuming that +the various where clauses hold, we prove that every type appearing in the impl +is well-formed, ***except*** types appearing in the impl header +`SomeType: Trait`. Instead, we *assume* that those types are +well-formed +(hence the `if (FromEnv(InputTypes(SomeType: Trait)))` +conditions). This is +part of the implied bounds proposal, so that we can rely on the bounds +written on the definition of e.g. the `SomeType` type (and that we don't +need to repeat those bounds). +> Note that we don't need to check well-formedness of types appearing in +> `WC_assoc` because we already did that in the trait decl (they are just +> repeated with some substitutions of values which we already assume to be +> well-formed) + +Next, still assuming that the where clauses on the impl `WC_impl` hold and that +the input types of `SomeType` are well-formed, we prove that +`WellFormed(SomeType: Trait)` hold. That is, we want to prove +that `SomeType` verify all the where clauses that might transitively +be required by the `Trait` definition (see +[this subsection](./implied-bounds.md#co-inductiveness-of-wellformed)). + +Lastly, assuming in addition that the where clauses on the associated type +`WC_assoc` hold, +we prove that `WellFormed(SomeValue: Bounds_assoc)` hold. Again, we are +not only proving `Implemented(SomeValue: Bounds_assoc)`, but also +all the facts that might transitively come from `Bounds_assoc`. We must do this +because we allow the use of implied bounds on associated types: if we have +`FromEnv(SomeType: Trait)` in our environment, the lowering rules +chapter indicates that we are able to deduce +`FromEnv(::Assoc: Bounds_assoc)` without knowing what the +precise value of `::Assoc` is. + +Some examples for the generated goal: +```rust,ignore +// Trait Program Clauses + +// These are program clauses that come from the trait definitions below +// and that the trait solver can use for its reasonings. I'm just restating +// them here so that we have them in mind. + +trait Copy { } +// This is a program clause that comes from the trait definition above +// and that the trait solver can use for its reasonings. I'm just restating +// it here (and also the few other ones coming just after) so that we have +// them in mind. +// `WellFormed(Self: Copy) :- Implemented(Self: Copy).` + +trait Partial where Self: Copy { } +// ``` +// WellFormed(Self: Partial) :- +// Implemented(Self: Partial) && +// WellFormed(Self: Copy). +// ``` + +trait Complete where Self: Partial { } +// ``` +// WellFormed(Self: Complete) :- +// Implemented(Self: Complete) && +// WellFormed(Self: Partial). +// ``` + +// Impl WF Goals + +impl Partial for T where T: Complete { } +// The generated goal is: +// ``` +// forall { +// if (FromEnv(T: Complete)) { +// WellFormed(T: Partial) +// } +// } +// ``` +// Then proving `WellFormed(T: Partial)` amounts to proving +// `Implemented(T: Partial)` and `Implemented(T: Copy)`. +// Both those facts can be deduced from the `FromEnv(T: Complete)` in our +// environment: this impl is legal. + +impl Complete for T { } +// The generated goal is: +// ``` +// forall { +// WellFormed(T: Complete) +// } +// ``` +// Then proving `WellFormed(T: Complete)` amounts to proving +// `Implemented(T: Complete)`, `Implemented(T: Partial)` and +// `Implemented(T: Copy)`. +// +// `Implemented(T: Complete)` can be proved thanks to the +// `impl Complete for T` blanket impl. +// +// `Implemented(T: Partial)` can be proved thanks to the +// `impl Partial for T where T: Complete` impl and because we know +// `T: Complete` holds. + +// However, `Implemented(T: Copy)` cannot be proved: the impl is illegal. +// An additional `where T: Copy` bound would be sufficient to make that impl +// legal. +``` + +```rust,ignore +trait Bar { } + +impl Bar for T where ::Item: Bar { } +// We have a non-parameter type appearing in the where clauses: +// `::Item`. The generated goal is: +// ``` +// forall { +// if (FromEnv(::Item: Bar)) { +// WellFormed(T: Bar) && +// WellFormed(::Item: Bar) +// } +// } +// ``` +// And `WellFormed(::Item: Bar)` is not provable: we'd need +// an additional `where T: Iterator` for example. +``` + +```rust,ignore +trait Foo { } + +trait Bar { + type Item: Foo; +} + +struct Stuff { } + +impl Bar for Stuff where T: Foo { + type Item = T; +} +// The generated goal is: +// ``` +// forall { +// if (FromEnv(T: Foo)) { +// WellFormed(T: Foo). +// } +// } +// ``` +// which is provable. +``` + +```rust,ignore +trait Debug { ... } +// `WellFormed(Self: Debug) :- Implemented(Self: Debug).` + +struct Box { ... } +impl Debug for Box where T: Debug { ... } + +trait PointerFamily { + type Pointer: Debug where T: Debug; +} +// `WellFormed(Self: PointerFamily) :- Implemented(Self: PointerFamily).` + +struct BoxFamily; + +impl PointerFamily for BoxFamily { + type Pointer = Box where T: Debug; +} +// The generated goal is: +// ``` +// forall { +// WellFormed(BoxFamily: PointerFamily) && +// +// if (FromEnv(T: Debug)) { +// WellFormed(Box: Debug) && +// WellFormed(Box) +// } +// } +// ``` +// `WellFormed(BoxFamily: PointerFamily)` amounts to proving +// `Implemented(BoxFamily: PointerFamily)`, which is ok thanks to our impl. +// +// `WellFormed(Box)` is always true (there are no where clauses on the +// `Box` type definition). +// +// Moreover, we have an `impl Debug for Box`, hence +// we can prove `WellFormed(Box: Debug)` and the impl is indeed legal. +``` + +```rust,ignore +trait Foo { + type Assoc; +} + +struct OnlyClone { ... } + +impl Foo for i32 { + type Assoc = OnlyClone; +} +// The generated goal is: +// ``` +// forall { +// WellFormed(i32: Foo) && +// WellFormed(OnlyClone) +// } +// ``` +// however `WellFormed(OnlyClone)` is not provable because it requires +// `Implemented(T: Clone)`. It would be tempting to just add a `where T: Clone` +// bound inside the `impl Foo for i32` block, however we saw that it was +// illegal to add where clauses that didn't come from the trait definition. +``` diff --git a/src/doc/rustc-guide/src/ty.md b/src/doc/rustc-guide/src/ty.md new file mode 100644 index 0000000000..fea9afbeb0 --- /dev/null +++ b/src/doc/rustc-guide/src/ty.md @@ -0,0 +1,168 @@ +# The `ty` module: representing types + +The `ty` module defines how the Rust compiler represents types +internally. It also defines the *typing context* (`tcx` or `TyCtxt`), +which is the central data structure in the compiler. + +## The tcx and how it uses lifetimes + +The `tcx` ("typing context") is the central data structure in the +compiler. It is the context that you use to perform all manner of +queries. The struct `TyCtxt` defines a reference to this shared context: + +```rust,ignore +tcx: TyCtxt<'a, 'gcx, 'tcx> +// -- ---- ---- +// | | | +// | | innermost arena lifetime (if any) +// | "global arena" lifetime +// lifetime of this reference +``` + +As you can see, the `TyCtxt` type takes three lifetime parameters. +These lifetimes are perhaps the most complex thing to understand about +the tcx. During Rust compilation, we allocate most of our memory in +**arenas**, which are basically pools of memory that get freed all at +once. When you see a reference with a lifetime like `'tcx` or `'gcx`, +you know that it refers to arena-allocated data (or data that lives as +long as the arenas, anyhow). + +We use two distinct levels of arenas. The outer level is the "global +arena". This arena lasts for the entire compilation: so anything you +allocate in there is only freed once compilation is basically over +(actually, when we shift to executing LLVM). + +To reduce peak memory usage, when we do type inference, we also use an +inner level of arena. These arenas get thrown away once type inference +is over. This is done because type inference generates a lot of +"throw-away" types that are not particularly interesting after type +inference completes, so keeping around those allocations would be +wasteful. + +Often, we wish to write code that explicitly asserts that it is not +taking place during inference. In that case, there is no "local" +arena, and all the types that you can access are allocated in the +global arena. To express this, the idea is to use the same lifetime +for the `'gcx` and `'tcx` parameters of `TyCtxt`. Just to be a touch +confusing, we tend to use the name `'tcx` in such contexts. Here is an +example: + +```rust,ignore +fn not_in_inference<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { + // ---- ---- + // Using the same lifetime here asserts + // that the innermost arena accessible through + // this reference *is* the global arena. +} +``` + +In contrast, if we want to code that can be usable during type inference, then +you need to declare a distinct `'gcx` and `'tcx` lifetime parameter: + +```rust,ignore +fn maybe_in_inference<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) { + // ---- ---- + // Using different lifetimes here means that + // the innermost arena *may* be distinct + // from the global arena (but doesn't have to be). +} +``` + +### Allocating and working with types + +Rust types are represented using the `Ty<'tcx>` defined in the `ty` +module (not to be confused with the `Ty` struct from [the HIR]). This +is in fact a simple type alias for a reference with `'tcx` lifetime: + +```rust,ignore +pub type Ty<'tcx> = &'tcx TyS<'tcx>; +``` + +[the HIR]: ./hir.html + +You can basically ignore the `TyS` struct – you will basically never +access it explicitly. We always pass it by reference using the +`Ty<'tcx>` alias – the only exception I think is to define inherent +methods on types. Instances of `TyS` are only ever allocated in one of +the rustc arenas (never e.g. on the stack). + +One common operation on types is to **match** and see what kinds of +types they are. This is done by doing `match ty.sty`, sort of like this: + +```rust,ignore +fn test_type<'tcx>(ty: Ty<'tcx>) { + match ty.sty { + ty::TyArray(elem_ty, len) => { ... } + ... + } +} +``` + +The `sty` field (the origin of this name is unclear to me; perhaps +structural type?) is of type `TyKind<'tcx>`, which is an enum +defining all of the different kinds of types in the compiler. + +> N.B. inspecting the `sty` field on types during type inference can be +> risky, as there may be inference variables and other things to +> consider, or sometimes types are not yet known that will become +> known later.). + +To allocate a new type, you can use the various `mk_` methods defined +on the `tcx`. These have names that correpond mostly to the various kinds +of type variants. For example: + +```rust,ignore +let array_ty = tcx.mk_array(elem_ty, len * 2); +``` + +These methods all return a `Ty<'tcx>` – note that the lifetime you +get back is the lifetime of the innermost arena that this `tcx` has +access to. In fact, types are always canonicalized and interned (so we +never allocate exactly the same type twice) and are always allocated +in the outermost arena where they can be (so, if they do not contain +any inference variables or other "temporary" types, they will be +allocated in the global arena). However, the lifetime `'tcx` is always +a safe approximation, so that is what you get back. + +> NB. Because types are interned, it is possible to compare them for +> equality efficiently using `==` – however, this is almost never what +> you want to do unless you happen to be hashing and looking for +> duplicates. This is because often in Rust there are multiple ways to +> represent the same type, particularly once inference is involved. If +> you are going to be testing for type equality, you probably need to +> start looking into the inference code to do it right. + +You can also find various common types in the `tcx` itself by accessing +`tcx.types.bool`, `tcx.types.char`, etc (see `CommonTypes` for more). + +### Beyond types: other kinds of arena-allocated data structures + +In addition to types, there are a number of other arena-allocated data +structures that you can allocate, and which are found in this +module. Here are a few examples: + +- `Substs`, allocated with `mk_substs` – this will intern a slice of types, + often used to specify the values to be substituted for generics + (e.g. `HashMap` would be represented as a slice + `&'tcx [tcx.types.i32, tcx.types.u32]`). +- `TraitRef`, typically passed by value – a **trait reference** + consists of a reference to a trait along with its various type + parameters (including `Self`), like `i32: Display` (here, the def-id + would reference the `Display` trait, and the substs would contain + `i32`). +- `Predicate` defines something the trait system has to prove (see `traits` + module). + +### Import conventions + +Although there is no hard and fast rule, the `ty` module tends to be used like +so: + +```rust,ignore +use ty::{self, Ty, TyCtxt}; +``` + +In particular, since they are so common, the `Ty` and `TyCtxt` types +are imported directly. Other types are often referenced with an +explicit `ty::` prefix (e.g. `ty::TraitRef<'tcx>`). But some modules +choose to import a larger or smaller set of names explicitly. diff --git a/src/doc/rustc-guide/src/type-checking.md b/src/doc/rustc-guide/src/type-checking.md new file mode 100644 index 0000000000..d4f4bf1107 --- /dev/null +++ b/src/doc/rustc-guide/src/type-checking.md @@ -0,0 +1,44 @@ +# Type checking + +The [`rustc_typeck`][typeck] crate contains the source for "type collection" +and "type checking", as well as a few other bits of related functionality. (It +draws heavily on the [type inference] and [trait solving].) + +[typeck]: https://github.com/rust-lang/rust/tree/master/src/librustc_typeck +[type inference]: type-inference.html +[trait solving]: traits/resolution.html + +## Type collection + +Type "collection" is the process of converting the types found in the HIR +(`hir::Ty`), which represent the syntactic things that the user wrote, into the +**internal representation** used by the compiler (`Ty<'tcx>`) – we also do +similar conversions for where-clauses and other bits of the function signature. + +To try and get a sense for the difference, consider this function: + +```rust,ignore +struct Foo { } +fn foo(x: Foo, y: self::Foo) { ... } +// ^^^ ^^^^^^^^^ +``` + +Those two parameters `x` and `y` each have the same type: but they will have +distinct `hir::Ty` nodes. Those nodes will have different spans, and of course +they encode the path somewhat differently. But once they are "collected" into +`Ty<'tcx>` nodes, they will be represented by the exact same internal type. + +Collection is defined as a bundle of [queries] for computing information about +the various functions, traits, and other items in the crate being compiled. +Note that each of these queries is concerned with *interprocedural* things – +for example, for a function definition, collection will figure out the type and +signature of the function, but it will not visit the *body* of the function in +any way, nor examine type annotations on local variables (that's the job of +type *checking*). + +For more details, see the [`collect`][collect] module. + +[queries]: query.html +[collect]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_typeck/collect/ + +**TODO**: actually talk about type checking... diff --git a/src/doc/rustc-guide/src/type-inference.md b/src/doc/rustc-guide/src/type-inference.md new file mode 100644 index 0000000000..a3d012b5df --- /dev/null +++ b/src/doc/rustc-guide/src/type-inference.md @@ -0,0 +1,253 @@ +# Type inference + +Type inference is the process of automatic detection of the type of an +expression. + +It is what allows Rust to work with fewer or no type annotations, +making things easier for users: + +```rust,ignore +fn main() { + let mut things = vec![]; + things.push("thing") +} +``` + +Here, the type of `things` is *inferenced* to be `&str` because that's the value +we push into `things`. + +The type inference is based on the standard Hindley-Milner (HM) type inference +algorithm, but extended in various way to accommodate subtyping, region +inference, and higher-ranked types. + +## A note on terminology + +We use the notation `?T` to refer to inference variables, also called +existential variables. + +We use the terms "region" and "lifetime" interchangeably. Both refer to +the `'a` in `&'a T`. + +The term "bound region" refers to a region that is bound in a function +signature, such as the `'a` in `for<'a> fn(&'a u32)`. A region is +"free" if it is not bound. + +## Creating an inference context + +You create and "enter" an inference context by doing something like +the following: + +```rust,ignore +tcx.infer_ctxt().enter(|infcx| { + // Use the inference context `infcx` here. +}) +``` + +Each inference context creates a short-lived type arena to store the +fresh types and things that it will create, as described in the +[chapter on the `ty` module][ty-ch]. This arena is created by the `enter` +function and disposed of after it returns. + +[ty-ch]: ty.html + +Within the closure, `infcx` has the type `InferCtxt<'cx, 'gcx, 'tcx>` +for some fresh `'cx` and `'tcx` – the latter corresponds to the lifetime of +this temporary arena, and the `'cx` is the lifetime of the `InferCtxt` itself. +(Again, see the [`ty` chapter][ty-ch] for more details on this setup.) + +The `tcx.infer_ctxt` method actually returns a builder, which means +there are some kinds of configuration you can do before the `infcx` is +created. See `InferCtxtBuilder` for more information. + + + +## Inference variables + +The main purpose of the inference context is to house a bunch of +**inference variables** – these represent types or regions whose precise +value is not yet known, but will be uncovered as we perform type-checking. + +If you're familiar with the basic ideas of unification from H-M type +systems, or logic languages like Prolog, this is the same concept. If +you're not, you might want to read a tutorial on how H-M type +inference works, or perhaps this blog post on +[unification in the Chalk project]. + +[Unification in the Chalk project]: http://smallcultfollowing.com/babysteps/blog/2017/03/25/unification-in-chalk-part-1/ + +All told, the inference context stores four kinds of inference variables as of +this writing: + +- Type variables, which come in three varieties: + - General type variables (the most common). These can be unified with any + type. + - Integral type variables, which can only be unified with an integral type, + and arise from an integer literal expression like `22`. + - Float type variables, which can only be unified with a float type, and + arise from a float literal expression like `22.0`. +- Region variables, which represent lifetimes, and arise all over the place. + +All the type variables work in much the same way: you can create a new +type variable, and what you get is `Ty<'tcx>` representing an +unresolved type `?T`. Then later you can apply the various operations +that the inferencer supports, such as equality or subtyping, and it +will possibly **instantiate** (or **bind**) that `?T` to a specific +value as a result. + +The region variables work somewhat differently, and are described +below in a separate section. + +## Enforcing equality / subtyping + +The most basic operations you can perform in the type inferencer is +**equality**, which forces two types `T` and `U` to be the same. The +recommended way to add an equality constraint is to use the `at` +method, roughly like so: + +```rust,ignore +infcx.at(...).eq(t, u); +``` + +The first `at()` call provides a bit of context, i.e. why you are +doing this unification, and in what environment, and the `eq` method +performs the actual equality constraint. + +When you equate things, you force them to be precisely equal. Equating +returns an `InferResult` – if it returns `Err(err)`, then equating +failed, and the enclosing `TypeError` will tell you what went wrong. + +The success case is perhaps more interesting. The "primary" return +type of `eq` is `()` – that is, when it succeeds, it doesn't return a +value of any particular interest. Rather, it is executed for its +side-effects of constraining type variables and so forth. However, the +actual return type is not `()`, but rather `InferOk<()>`. The +`InferOk` type is used to carry extra trait obligations – your job is +to ensure that these are fulfilled (typically by enrolling them in a +fulfillment context). See the [trait chapter] for more background on that. + +[trait chapter]: traits/resolution.html + +You can similarly enforce subtyping through `infcx.at(..).sub(..)`. The same +basic concepts as above apply. + +## "Trying" equality + +Sometimes you would like to know if it is *possible* to equate two +types without error. You can test that with `infcx.can_eq` (or +`infcx.can_sub` for subtyping). If this returns `Ok`, then equality +is possible – but in all cases, any side-effects are reversed. + +Be aware, though, that the success or failure of these methods is always +**modulo regions**. That is, two types `&'a u32` and `&'b u32` will +return `Ok` for `can_eq`, even if `'a != 'b`. This falls out from the +"two-phase" nature of how we solve region constraints. + +## Snapshots + +As described in the previous section on `can_eq`, often it is useful +to be able to do a series of operations and then roll back their +side-effects. This is done for various reasons: one of them is to be +able to backtrack, trying out multiple possibilities before settling +on which path to take. Another is in order to ensure that a series of +smaller changes take place atomically or not at all. + +To allow for this, the inference context supports a `snapshot` method. +When you call it, it will start recording changes that occur from the +operations you perform. When you are done, you can either invoke +`rollback_to`, which will undo those changes, or else `confirm`, which +will make the permanent. Snapshots can be nested as long as you follow +a stack-like discipline. + +Rather than use snapshots directly, it is often helpful to use the +methods like `commit_if_ok` or `probe` that encapsulate higher-level +patterns. + +## Subtyping obligations + +One thing worth discussing is subtyping obligations. When you force +two types to be a subtype, like `?T <: i32`, we can often convert those +into equality constraints. This follows from Rust's rather limited notion +of subtyping: so, in the above case, `?T <: i32` is equivalent to `?T = i32`. + +However, in some cases we have to be more careful. For example, when +regions are involved. So if you have `?T <: &'a i32`, what we would do +is to first "generalize" `&'a i32` into a type with a region variable: +`&'?b i32`, and then unify `?T` with that (`?T = &'?b i32`). We then +relate this new variable with the original bound: + +```text +&'?b i32 <: &'a i32 +``` + +This will result in a region constraint (see below) of `'?b: 'a`. + +One final interesting case is relating two unbound type variables, +like `?T <: ?U`. In that case, we can't make progress, so we enqueue +an obligation `Subtype(?T, ?U)` and return it via the `InferOk` +mechanism. You'll have to try again when more details about `?T` or +`?U` are known. + +## Region constraints + +Regions are inferenced somewhat differently from types. Rather than +eagerly unifying things, we simply collect constraints as we go, but +make (almost) no attempt to solve regions. These constraints have the +form of an "outlives" constraint: + +```text +'a: 'b +``` + +Actually the code tends to view them as a subregion relation, but it's the same +idea: + +```text +'b <= 'a +``` + +(There are various other kinds of constraints, such as "verifys"; see +the `region_constraints` module for details.) + +There is one case where we do some amount of eager unification. If you have an +equality constraint between two regions + +```text +'a = 'b +``` + +we will record that fact in a unification table. You can then use +`opportunistic_resolve_var` to convert `'b` to `'a` (or vice +versa). This is sometimes needed to ensure termination of fixed-point +algorithms. + +## Extracting region constraints + +Ultimately, region constraints are only solved at the very end of +type-checking, once all other constraints are known. There are two +ways to solve region constraints right now: lexical and +non-lexical. Eventually there will only be one. + +To solve **lexical** region constraints, you invoke +`resolve_regions_and_report_errors`. This "closes" the region +constraint process and invoke the `lexical_region_resolve` code. Once +this is done, any further attempt to equate or create a subtyping +relationship will yield an ICE. + +Non-lexical region constraints are not handled within the inference +context. Instead, the NLL solver (actually, the MIR type-checker) +invokes `take_and_reset_region_constraints` periodically. This +extracts all of the outlives constraints from the region solver, but +leaves the set of variables intact. This is used to get *just* the +region constraints that resulted from some particular point in the +program, since the NLL solver needs to know not just *what* regions +were subregions but *where*. Finally, the NLL solver invokes +`take_region_var_origins`, which "closes" the region constraint +process in the same way as normal solving. + +## Lexical region resolution + +Lexical region resolution is done by initially assigning each region +variable to an empty value. We then process each outlives constraint +repeatedly, growing region variables until a fixed-point is reached. +Region variables can be grown using a least-upper-bound relation on +the region lattice in a fairly straightforward fashion. diff --git a/src/doc/rustc-guide/src/variance.md b/src/doc/rustc-guide/src/variance.md new file mode 100644 index 0000000000..9fe98b4a14 --- /dev/null +++ b/src/doc/rustc-guide/src/variance.md @@ -0,0 +1,316 @@ +# Variance of type and lifetime parameters + +For a more general background on variance, see the [background] appendix. + +[background]: ./appendix/background.html + +During type checking we must infer the variance of type and lifetime +parameters. The algorithm is taken from Section 4 of the paper ["Taming the +Wildcards: Combining Definition- and Use-Site Variance"][pldi11] published in +PLDI'11 and written by Altidor et al., and hereafter referred to as The Paper. + +[pldi11]: https://people.cs.umass.edu/~yannis/variance-extended2011.pdf + +This inference is explicitly designed *not* to consider the uses of +types within code. To determine the variance of type parameters +defined on type `X`, we only consider the definition of the type `X` +and the definitions of any types it references. + +We only infer variance for type parameters found on *data types* +like structs and enums. In these cases, there is a fairly straightforward +explanation for what variance means. The variance of the type +or lifetime parameters defines whether `T` is a subtype of `T` +(resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B` +(resp. `'a` and `'b`). + +We do not infer variance for type parameters found on traits, functions, +or impls. Variance on trait parameters can indeed make sense +(and we used to compute it) but it is actually rather subtle in +meaning and not that useful in practice, so we removed it. See the +[addendum] for some details. Variances on function/impl parameters, on the +other hand, doesn't make sense because these parameters are instantiated and +then forgotten, they don't persist in types or compiled byproducts. + +[addendum]: #addendum + +> **Notation** +> +> We use the notation of The Paper throughout this chapter: +> +> - `+` is _covariance_. +> - `-` is _contravariance_. +> - `*` is _bivariance_. +> - `o` is _invariance_. + +## The algorithm + +The basic idea is quite straightforward. We iterate over the types +defined and, for each use of a type parameter `X`, accumulate a +constraint indicating that the variance of `X` must be valid for the +variance of that use site. We then iteratively refine the variance of +`X` until all constraints are met. There is *always* a solution, because at +the limit we can declare all type parameters to be invariant and all +constraints will be satisfied. + +As a simple example, consider: + +```rust,ignore +enum Option { Some(A), None } +enum OptionalFn { Some(|B|), None } +enum OptionalMap { Some(|C| -> C), None } +``` + +Here, we will generate the constraints: + +```text +1. V(A) <= + +2. V(B) <= - +3. V(C) <= + +4. V(C) <= - +``` + +These indicate that (1) the variance of A must be at most covariant; +(2) the variance of B must be at most contravariant; and (3, 4) the +variance of C must be at most covariant *and* contravariant. All of these +results are based on a variance lattice defined as follows: + +```text + * Top (bivariant) +- + + o Bottom (invariant) +``` + +Based on this lattice, the solution `V(A)=+`, `V(B)=-`, `V(C)=o` is the +optimal solution. Note that there is always a naive solution which +just declares all variables to be invariant. + +You may be wondering why fixed-point iteration is required. The reason +is that the variance of a use site may itself be a function of the +variance of other type parameters. In full generality, our constraints +take the form: + +```text +V(X) <= Term +Term := + | - | * | o | V(X) | Term x Term +``` + +Here the notation `V(X)` indicates the variance of a type/region +parameter `X` with respect to its defining class. `Term x Term` +represents the "variance transform" as defined in the paper: + +> If the variance of a type variable `X` in type expression `E` is `V2` + and the definition-site variance of the [corresponding] type parameter + of a class `C` is `V1`, then the variance of `X` in the type expression + `C` is `V3 = V1.xform(V2)`. + +## Constraints + +If I have a struct or enum with where clauses: + +```rust,ignore +struct Foo { ... } +``` + +you might wonder whether the variance of `T` with respect to `Bar` affects the +variance `T` with respect to `Foo`. I claim no. The reason: assume that `T` is +invariant with respect to `Bar` but covariant with respect to `Foo`. And then +we have a `Foo` that is upcast to `Foo`, where `X <: Y`. However, while +`X : Bar`, `Y : Bar` does not hold. In that case, the upcast will be illegal, +but not because of a variance failure, but rather because the target type +`Foo` is itself just not well-formed. Basically we get to assume +well-formedness of all types involved before considering variance. + +### Dependency graph management + +Because variance is a whole-crate inference, its dependency graph +can become quite muddled if we are not careful. To resolve this, we refactor +into two queries: + +- `crate_variances` computes the variance for all items in the current crate. +- `variances_of` accesses the variance for an individual reading; it + works by requesting `crate_variances` and extracting the relevant data. + +If you limit yourself to reading `variances_of`, your code will only +depend then on the inference of that particular item. + +Ultimately, this setup relies on the [red-green algorithm][rga]. In particular, +every variance query effectively depends on all type definitions in the entire +crate (through `crate_variances`), but since most changes will not result in a +change to the actual results from variance inference, the `variances_of` query +will wind up being considered green after it is re-evaluated. + +[rga]: ./incremental-compilation.html + + + +## Addendum: Variance on traits + +As mentioned above, we used to permit variance on traits. This was +computed based on the appearance of trait type parameters in +method signatures and was used to represent the compatibility of +vtables in trait objects (and also "virtual" vtables or dictionary +in trait bounds). One complication was that variance for +associated types is less obvious, since they can be projected out +and put to myriad uses, so it's not clear when it is safe to allow +`X::Bar` to vary (or indeed just what that means). Moreover (as +covered below) all inputs on any trait with an associated type had +to be invariant, limiting the applicability. Finally, the +annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all +trait type parameters had a variance were confusing and annoying +for little benefit. + +Just for historical reference, I am going to preserve some text indicating how +one could interpret variance and trait matching. + +### Variance and object types + +Just as with structs and enums, we can decide the subtyping +relationship between two object types `&Trait` and `&Trait` +based on the relationship of `A` and `B`. Note that for object +types we ignore the `Self` type parameter – it is unknown, and +the nature of dynamic dispatch ensures that we will always call a +function that is expected the appropriate `Self` type. However, we +must be careful with the other type parameters, or else we could +end up calling a function that is expecting one type but provided +another. + +To see what I mean, consider a trait like so: + +```rust +trait ConvertTo { + fn convertTo(&self) -> A; +} +``` + +Intuitively, If we had one object `O=&ConvertTo` and another +`S=&ConvertTo`, then `S <: O` because `String <: Object` +(presuming Java-like "string" and "object" types, my go to examples +for subtyping). The actual algorithm would be to compare the +(explicit) type parameters pairwise respecting their variance: here, +the type parameter A is covariant (it appears only in a return +position), and hence we require that `String <: Object`. + +You'll note though that we did not consider the binding for the +(implicit) `Self` type parameter: in fact, it is unknown, so that's +good. The reason we can ignore that parameter is precisely because we +don't need to know its value until a call occurs, and at that time (as +you said) the dynamic nature of virtual dispatch means the code we run +will be correct for whatever value `Self` happens to be bound to for +the particular object whose method we called. `Self` is thus different +from `A`, because the caller requires that `A` be known in order to +know the return type of the method `convertTo()`. (As an aside, we +have rules preventing methods where `Self` appears outside of the +receiver position from being called via an object.) + +### Trait variance and vtable resolution + +But traits aren't only used with objects. They're also used when +deciding whether a given impl satisfies a given trait bound. To set the +scene here, imagine I had a function: + +```rust,ignore +fn convertAll>(v: &[T]) { ... } +``` + +Now imagine that I have an implementation of `ConvertTo` for `Object`: + +```rust,ignore +impl ConvertTo for Object { ... } +``` + +And I want to call `convertAll` on an array of strings. Suppose +further that for whatever reason I specifically supply the value of +`String` for the type parameter `T`: + +```rust,ignore +let mut vector = vec!["string", ...]; +convertAll::(vector); +``` + +Is this legal? To put another way, can we apply the `impl` for +`Object` to the type `String`? The answer is yes, but to see why +we have to expand out what will happen: + +- `convertAll` will create a pointer to one of the entries in the + vector, which will have type `&String` +- It will then call the impl of `convertTo()` that is intended + for use with objects. This has the type `fn(self: &Object) -> i32`. + + It is OK to provide a value for `self` of type `&String` because + `&String <: &Object`. + +OK, so intuitively we want this to be legal, so let's bring this back +to variance and see whether we are computing the correct result. We +must first figure out how to phrase the question "is an impl for +`Object,i32` usable where an impl for `String,i32` is expected?" + +Maybe it's helpful to think of a dictionary-passing implementation of +type classes. In that case, `convertAll()` takes an implicit parameter +representing the impl. In short, we *have* an impl of type: + +```text +V_O = ConvertTo for Object +``` + +and the function prototype expects an impl of type: + +```text +V_S = ConvertTo for String +``` + +As with any argument, this is legal if the type of the value given +(`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`? +The answer will depend on the variance of the various parameters. In +this case, because the `Self` parameter is contravariant and `A` is +covariant, it means that: + +```text +V_O <: V_S iff + i32 <: i32 + String <: Object +``` + +These conditions are satisfied and so we are happy. + +### Variance and associated types + +Traits with associated types – or at minimum projection +expressions – must be invariant with respect to all of their +inputs. To see why this makes sense, consider what subtyping for a +trait reference means: + +```text + <: +``` + +means that if I know that `T as Trait`, I also know that `U as +Trait`. Moreover, if you think of it as dictionary passing style, +it means that a dictionary for `` is safe to use where +a dictionary for `` is expected. + +The problem is that when you can project types out from ``, the relationship to types projected out of `` +is completely unknown unless `T==U` (see #21726 for more +details). Making `Trait` invariant ensures that this is true. + +Another related reason is that if we didn't make traits with +associated types invariant, then projection is no longer a +function with a single result. Consider: + +```rust,ignore +trait Identity { type Out; fn foo(&self); } +impl Identity for T { type Out = T; ... } +``` + +Now if I have `<&'static () as Identity>::Out`, this can be +validly derived as `&'a ()` for any `'a`: + +```text +<&'a () as Identity> <: <&'static () as Identity> +if &'static () < : &'a () -- Identity is contravariant in Self +if 'static : 'a -- Subtyping rules for relations +``` + +This change otoh means that `<'static () as Identity>::Out` is +always `&'static ()` (which might then be upcast to `'a ()`, +separately). This was helpful in solving #21750. diff --git a/src/doc/rustc-guide/src/walkthrough.md b/src/doc/rustc-guide/src/walkthrough.md new file mode 100644 index 0000000000..e77a0f6906 --- /dev/null +++ b/src/doc/rustc-guide/src/walkthrough.md @@ -0,0 +1,269 @@ +# Walkthrough: a typical contribution + +There are _a lot_ of ways to contribute to the rust compiler, including fixing +bugs, improving performance, helping design features, providing feedback on +existing features, etc. This chapter does not claim to scratch the surface. +Instead, it walks through the design and implementation of a new feature. Not +all of the steps and processes described here are needed for every +contribution, and I will try to point those out as they arise. + +In general, if you are interested in making a contribution and aren't sure +where to start, please feel free to ask! + +## Overview + +The feature I will discuss in this chapter is the `?` Kleene operator for +macros. Basically, we want to be able to write something like this: + +```rust,ignore +macro_rules! foo { + ($arg:ident $(, $optional_arg:ident)?) => { + println!("{}", $arg); + + $( + println!("{}", $optional_arg); + )? + } +} + +fn main() { + let x = 0; + foo!(x); // ok! prints "0" + foo!(x, x); // ok! prints "0 0" +} +``` + +So basically, the `$(pat)?` matcher in the macro means "this pattern can occur +0 or 1 times", similar to other regex syntaxes. + +There were a number of steps to go from an idea to stable rust feature. Here is +a quick list. We will go through each of these in order below. As I mentioned +before, not all of these are needed for every type of contribution. + +- **Idea discussion/Pre-RFC** A Pre-RFC is an early draft or design discussion + of a feature. This stage is intended to flesh out the design space a bit and + get a grasp on the different merits and problems with an idea. It's a great + way to get early feedback on your idea before presenting it the wider + audience. You can find the original discussion [here][prerfc]. +- **RFC** This is when you formally present your idea to the community for + consideration. You can find the RFC [here][rfc]. +- **Implementation** Implement your idea unstabley in the compiler. You can + find the original implementation [here][impl1]. +- **Possibly iterate/refine** As the community gets experience with your + feature on the nightly compiler and in `libstd`, there may be additional + feedback about design choice that might be adjusted. This particular feature + went [through][impl2] a [number][impl3] of [iterations][impl4]. +- **Stabilization** When your feature has baked enough, a rust team member may + [propose to stabilize it][merge]. If there is consensus, this is done. +- **Relax** Your feature is now a stable rust feature! + +[prerfc]: https://internals.rust-lang.org/t/pre-rfc-at-most-one-repetition-macro-patterns/6557 +[rfc]: https://github.com/rust-lang/rfcs/pull/2298 +[impl1]: https://github.com/rust-lang/rust/pull/47752 +[impl2]: https://github.com/rust-lang/rust/pull/49719 +[impl3]: https://github.com/rust-lang/rust/pull/51336 +[impl4]: https://github.com/rust-lang/rust/pull/51587 +[merge]: https://github.com/rust-lang/rust/issues/48075#issuecomment-433177613 + +## Pre-RFC and RFC + +> NOTE: In general, if you are not proposing a _new_ feature or substantial +> change to rust or the ecosystem, you don't need to follow the RFC process. +> Instead, you can just jump to [implementation](#impl). +> +> You can find the official guidelines for when to open an RFC [here][rfcwhen]. + +[rfcwhen]: https://github.com/rust-lang/rfcs#when-you-need-to-follow-this-process + +An RFC is a document that describes the feature or change you are proposing in +detail. Anyone can write an RFC; the process is the same for everyone, +including rust team members. + +To open an RFC, open a PR on the +[rust-lang/rfcs](https://github.com/rust-lang/rfcs) repo on GitHub. You can +find detailed instructions in the +[README](https://github.com/rust-lang/rfcs#what-the-process-is). + +Before opening an RFC, you should do the research to "flesh out" your idea. +Hastily-proposed RFCs tend not to be accepted. You should generally have a good +description of the motivation, impact, disadvantages, and potential +interactions with other features. + +If that sounds like a lot of work, it's because it is. But no fear! Even if +you're not a compiler hacker, you can get great feedback by doing a _pre-RFC_. +This is an _informal_ discussion of the idea. The best place to do this is +internals.rust-lang.org. Your post doesn't have to follow any particular +structure. It doesn't even need to be a cohesive idea. Generally, you will get +tons of feedback that you can integrate back to produce a good RFC. + +(Another pro-tip: try searching the RFCs repo and internals for prior related +ideas. A lot of times an idea has already been considered and was either +rejected or postponed to be tried again later. This can save you and everybody +else some time) + +In the case of our example, a participant in the pre-RFC thread pointed out a +syntax ambiguity and a potential resolution. Also, the overall feedback seemed +positive. In this case, the discussion converged pretty quickly, but for some +ideas, a lot more discussion can happen (e.g. see [this RFC][nonascii] which +received a whopping 684 comments!). If that happens, don't be discouraged; it +means the community is interested in your idea, but it perhaps needs some +adjustments. + +[nonascii]: https://github.com/rust-lang/rfcs/pull/2457 + +The RFC for our `?` macro feature did receive some discussion on the RFC thread +too. As with most RFCs, there were a few questions that we couldn't answer by +discussion: we needed experience using the feature to decide. Such questions +are listed in the "Unresolved Questions" section of the RFC. Also, over the +course of the RFC discussion, you will probably want to update the RFC document +itself to reflect the course of the discussion (e.g. new alternatives or prior +work may be added or you may decide to change parts of the proposal itself). + +In the end, when the discussion seems to reach a consensus and die down a bit, +a rust team member may propose to move to FCP with one of three possible dispositions. +This means that they want the other members of the appropriate teams to review +and comment on the RFC. More discussion may ensue, which may result in more changes +or unresolved questions being added. At some point, when everyone is +satisfied, the RFC enters the "final comment period" (FCP), which is the last +chance for people to bring up objections. When the FCP is over, the disposition is +adopted. Here are the three possible dispositions: + +- _Merge_: accept the feature. Here is the proposal to merge for our [`?` macro + feature][rfcmerge]. +- _Close_: this feature in its current form is not a good fit for rust. Don't + be discouraged if this happens to your RFC, and don't take it personally. + This is not a reflection on you, but rather a community decision that rust + will go a different direction. +- _Postpone_: there is interest in going this direction but not at the moment. + This happens most often because the appropriate rust team doesn't have the + bandwidth to shepherd the feature through the process to stabilization. Often + this is the case when the feature doesn't fit into the team's roadmap. + Postponed ideas may be revisited later. + +[rfcmerge]: https://github.com/rust-lang/rfcs/pull/2298#issuecomment-360582667 + +When an RFC is merged, the PR is merged into the RFCs repo. A new _tracking +issue_ is created in the [rust-lang/rust] repo to track progress on the feature +and discuss unresolved questions, implementation progress and blockers, etc. +Here is the tracking issue on for our [`?` macro feature][tracking]. + +[tracking]: https://github.com/rust-lang/rust/issues/48075 + + + +## Implementation + +To make a change to the compiler, open a PR against the [rust-lang/rust] repo. + +[rust-lang/rust]: https://github.com/rust-lang/rust + +Depending on the feature/change/bug fix/improvement, implementation may be +relatively-straightforward or it may be a major undertaking. You can always ask +for help or mentorship from more experienced compiler devs. Also, you don't +have to be the one to implement your feature; but keep in mind that if you +don't it might be a while before someone else does. + +For the `?` macro feature, I needed to go understand the relevant parts of +macro expansion in the compiler. Personally, I find that [improving the +comments][comments] in the code is a helpful way of making sure I understand +it, but you don't have to do that if you don't want to. + +[comments]: https://github.com/rust-lang/rust/pull/47732 + +I then [implemented][impl1] the original feature, as described in the RFC. When +a new feature is implemented, it goes behind a _feature gate_, which means that +you have to use `#![feature(my_feature_name)]` to use the feature. The feature +gate is removed when the feature is stabilized. + +**Most bug fixes and improvements** don't require a feature gate. You can just +make your changes/improvements. + +When you open a PR on the [rust-lang/rust], a bot will assign your PR to a +review. If there is a particular rust team member you are working with, you can +request that reviewer by leaving a comment on the thread with `r? +@reviewer-github-id` (e.g. `r? @eddyb`). If you don't know who to request, +don't request anyone; the bot will assign someone automatically. + +The reviewer may request changes before they approve your PR. Feel free to ask +questions or discuss things you don't understand or disagree with. However, +recognize that the PR won't be merged unless someone on the rust team approves +it. + +When your review approves the PR, it will go into a queue for yet another bot +called `@bors`. `@bors` manages the CI build/merge queue. When your PR reaches +the head of the `@bors` queue, `@bors` will test out the merge by running all +tests against your PR on Travis CI. This takes about 2 hours as of this +writing. If all tests pass, the PR is merged and becomes part of the next +nightly compiler! + +There are a couple of things that may happen for some PRs during the review process + +- If the change is substantial enough, the reviewer may request an FCP on + the PR. This gives all members of the appropriate team a chance to review the + changes. +- If the change may cause breakage, the reviewer may request a [crater] run. + This compiles the compiler with your changes and then attempts to compile all + crates on crates.io with your modified compiler. This is a great smoke test + to check if you introduced a change to compiler behavior that affects a large + portion of the ecosystem. +- If the diff of your PR is large or the reviewer is busy, your PR may have + some merge conflicts with other PRs that happen to get merged first. You + should fix these merge conflicts using the normal git procedures. + +[crater]: ./tests/intro.html#crater + +If you are not doing a new feature or something like that (e.g. if you are +fixing a bug), then that's it! Thanks for your contribution :) + +## Refining your implementation + +As people get experience with your new feature on nightly, slight changes may +be proposed and unresolved questions may become resolved. Updates/changes go +through the same process for implementing any other changes, as described +above (i.e. submit a PR, go through review, wait for `@bors`, etc). + +Some changes may be major enough to require an FCP and some review by rust team +members. + +For the `?` macro feature, we went through a few different iterations after the +original implementation: [1][impl2], [2][impl3], [3][impl4]. + +Along the way, we decided that `?` should not take a separator, which was +previously an unresolved question listed in the RFC. We also changed the +disambiguation strategy: we decided to remove the ability to use `?` as a +separator token for other repetition operators (e.g. `+` or `*`). However, +since this was a breaking change, we decided to do it over an edition boundary. +Thus, the new feature can be enabled only in edition 2018. These deviations +from the original RFC required [another +FCP](https://github.com/rust-lang/rust/issues/51934). + +## Stabilization + +Finally, after the feature had baked for a while on nightly, a language team member +[moved to stabilize it][stabilizefcp]. + +[stabilizefcp]: https://github.com/rust-lang/rust/issues/48075#issuecomment-433177613 + +A _stabilization report_ needs to be written that includes + +- brief description of the behavior and any deviations from the RFC +- which edition(s) are affected and how +- links to a few tests to show the interesting aspects + +The stabilization report for our feature is [here][stabrep]. + +[stabrep]: https://github.com/rust-lang/rust/issues/48075#issuecomment-433243048 + +After this, [a PR is made][stab] to remove the feature gate, enabling the feature by +default (on the 2018 edition). A note is added to the [Release notes][relnotes] +about the feature. + +[stab]: https://github.com/rust-lang/rust/pull/56245 + +TODO: currently, we have a [forge article][feature-stab] about stabilization, but +we really ought to move that to the guide (in fact, we probably should have a whole +chapter about feature gates and stabilization). + +[feature-stab]: https://forge.rust-lang.org/stabilization-guide.html + +[relnotes]: https://github.com/rust-lang/rust/blob/master/RELEASES.md diff --git a/src/doc/rustc/src/contributing.md b/src/doc/rustc/src/contributing.md index fcb8e6b27d..3a1cafe8a6 100644 --- a/src/doc/rustc/src/contributing.md +++ b/src/doc/rustc/src/contributing.md @@ -1,6 +1,6 @@ # Contributing to rustc We'd love to have your help improving `rustc`! To that end, we've written [a -whole book](https://rust-lang-nursery.github.io/rustc-guide/) on its +whole book](https://rust-lang.github.io/rustc-guide/) on its internals, how it works, and how to get started working on it. To learn -more, you'll want to check that out. \ No newline at end of file +more, you'll want to check that out. diff --git a/src/doc/rustdoc/src/unstable-features.md b/src/doc/rustdoc/src/unstable-features.md index 071575b1fc..43cdab27e9 100644 --- a/src/doc/rustdoc/src/unstable-features.md +++ b/src/doc/rustdoc/src/unstable-features.md @@ -197,6 +197,22 @@ issue][issue-include]. [unstable-include]: ../unstable-book/language-features/external-doc.html [issue-include]: https://github.com/rust-lang/rust/issues/44732 +### Add aliases for an item in documentation search + +This feature allows you to add alias(es) to an item when using the `rustdoc` search through the +`doc(alias)` attribute. Example: + +```rust,no_run +#![feature(doc_alias)] + +#[doc(alias = "x")] +#[doc(alias = "big")] +pub struct BigX; +``` + +Then, when looking for it through the `rustdoc` search, if you enter "x" or +"big", search will show the `BigX` struct first. + ## Unstable command-line arguments These features are enabled by passing a command-line flag to Rustdoc, but the flags in question are @@ -374,18 +390,15 @@ This is an internal flag intended for the standard library and compiler that app allows `rustdoc` to be able to generate documentation for the compiler crates and the standard library, as an equivalent command-line argument is provided to `rustc` when building those crates. -### `doc_alias` feature +### `--index-page`: provide a top-level landing page for docs -This feature allows you to add alias(es) to an item when using the `rustdoc` search through the -`doc(alias)` attribute. Example: +This feature allows you to generate an index-page with a given markdown file. A good example of it +is the [rust documentation index](https://doc.rust-lang.org/index.html). -```rust,no_run -#![feature(doc_alias)] +With this, you'll have a page which you can custom as much as you want at the top of your crates. -#[doc(alias = "x")] -#[doc(alias = "big")] -pub struct BigX; -``` +Using `index-page` option enables `enable-index-page` option as well. -Then, when looking for it through the `rustdoc` search, if you enter "x" or -"big", search will show the `BigX` struct first. +### `--enable-index-page`: generate a default index page for docs + +This feature allows the generation of a default index-page which lists the generated crates. diff --git a/src/doc/unstable-book/src/language-features/macro-at-most-once-rep.md b/src/doc/unstable-book/src/language-features/macro-at-most-once-rep.md deleted file mode 100644 index 251fc72091..0000000000 --- a/src/doc/unstable-book/src/language-features/macro-at-most-once-rep.md +++ /dev/null @@ -1,22 +0,0 @@ -# `macro_at_most_once_rep` - -NOTE: This feature is only available in the 2018 Edition. - -The tracking issue for this feature is: #48075 - -With this feature gate enabled, one can use `?` as a Kleene operator meaning "0 -or 1 repetitions" in a macro definition. Previously only `+` and `*` were allowed. - -For example: - -```rust,ignore -#![feature(macro_at_most_once_rep)] - -macro_rules! foo { - (something $(,)?) // `?` indicates `,` is "optional"... - => {} -} -``` - ------------------------- - diff --git a/src/doc/unstable-book/src/language-features/macro-literal-matcher.md b/src/doc/unstable-book/src/language-features/macro-literal-matcher.md deleted file mode 100644 index 870158200d..0000000000 --- a/src/doc/unstable-book/src/language-features/macro-literal-matcher.md +++ /dev/null @@ -1,17 +0,0 @@ -# `macro_literal_matcher` - -The tracking issue for this feature is: [#35625] - -The RFC is: [rfc#1576]. - -With this feature gate enabled, the [list of designators] gains one more entry: - -* `literal`: a literal. Examples: 2, "string", 'c' - -A `literal` may be followed by anything, similarly to the `ident` specifier. - -[rfc#1576]: http://rust-lang.github.io/rfcs/1576-macros-literal-matcher.html -[#35625]: https://github.com/rust-lang/rust/issues/35625 -[list of designators]: ../reference/macros-by-example.html - ------------------------- diff --git a/src/doc/unstable-book/src/language-features/marker-trait-attr.md b/src/doc/unstable-book/src/language-features/marker-trait-attr.md index 9dd7b6fae9..dedc7d3015 100644 --- a/src/doc/unstable-book/src/language-features/marker-trait-attr.md +++ b/src/doc/unstable-book/src/language-features/marker-trait-attr.md @@ -17,15 +17,17 @@ when they'd need to do the same thing for every type anyway). ```rust #![feature(marker_trait_attr)] -use std::fmt::{Debug, Display}; +#[marker] trait CheapToClone: Clone {} -#[marker] trait MyMarker {} +impl CheapToClone for T {} -impl MyMarker for T {} -impl MyMarker for T {} +// These could potentally overlap with the blanket implementation above, +// so are only allowed because CheapToClone is a marker trait. +impl CheapToClone for (T, U) {} +impl CheapToClone for std::ops::Range {} -fn foo(t: T) -> T { - t +fn cheap_clone(t: T) -> T { + t.clone() } ``` diff --git a/src/doc/unstable-book/src/language-features/plugin.md b/src/doc/unstable-book/src/language-features/plugin.md index 74bdd4dc3b..03ea392c86 100644 --- a/src/doc/unstable-book/src/language-features/plugin.md +++ b/src/doc/unstable-book/src/language-features/plugin.md @@ -181,7 +181,6 @@ that warns about any item named `lintme`. ```rust,ignore #![feature(plugin_registrar)] #![feature(box_syntax, rustc_private)] -#![feature(macro_at_most_once_rep)] extern crate syntax; diff --git a/src/doc/unstable-book/src/language-features/self-in-typedefs.md b/src/doc/unstable-book/src/language-features/self-in-typedefs.md deleted file mode 100644 index 2416e85c17..0000000000 --- a/src/doc/unstable-book/src/language-features/self-in-typedefs.md +++ /dev/null @@ -1,24 +0,0 @@ -# `self_in_typedefs` - -The tracking issue for this feature is: [#49303] - -[#49303]: https://github.com/rust-lang/rust/issues/49303 - ------------------------- - -The `self_in_typedefs` feature gate lets you use the special `Self` identifier -in `struct`, `enum`, and `union` type definitions. - -A simple example is: - -```rust -#![feature(self_in_typedefs)] - -enum List -where - Self: PartialOrd // can write `Self` instead of `List` -{ - Nil, - Cons(T, Box) // likewise here -} -``` diff --git a/src/doc/unstable-book/src/language-features/self-struct-ctor.md b/src/doc/unstable-book/src/language-features/self-struct-ctor.md deleted file mode 100644 index b4742c48a3..0000000000 --- a/src/doc/unstable-book/src/language-features/self-struct-ctor.md +++ /dev/null @@ -1,33 +0,0 @@ -# `self_struct_ctor` - -The tracking issue for this feature is: [#51994] -[#51994]: https://github.com/rust-lang/rust/issues/51994 - ------------------------- - -The `self_struct_ctor` feature gate lets you use the special `Self` -identifier as a constructor and a pattern. - -A simple example is: - -```rust -#![feature(self_struct_ctor)] - -struct ST(i32, i32); - -impl ST { - fn new() -> Self { - ST(0, 1) - } - - fn ctor() -> Self { - Self(1,2) // constructed by `Self`, it is the same as `ST(1, 2)` - } - - fn pattern(self) { - match self { - Self(x, y) => println!("{} {}", x, y), // used as a pattern - } - } -} -``` diff --git a/src/doc/unstable-book/src/language-features/trait-alias.md b/src/doc/unstable-book/src/language-features/trait-alias.md new file mode 100644 index 0000000000..4f2db04016 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/trait-alias.md @@ -0,0 +1,34 @@ +# `trait_alias` + +The tracking issue for this feature is: [#41517] + +[#41417]: https://github.com/rust-lang/rust/issues/41517 + +------------------------ + +The `trait_alias` feature adds support for trait aliases. These allow aliases +to be created for one or more traits (currently just a single regular trait plus +any number of auto-traits), and used wherever traits would normally be used as +either bounds or trait objects. + +```rust +#![feature(trait_alias)] + +trait Foo = std::fmt::Debug + Send; +trait Bar = Foo + Sync; + +// Use trait alias as bound on type parameter. +fn foo(v: &T) { + println!("{:?}", v); +} + +pub fn main() { + foo(&1); + + // Use trait alias for trait objects. + let a: &Bar = &123; + println!("{:?}", a); + let b = Box::new(456) as Box; + println!("{:?}", b); +} +``` diff --git a/src/doc/unstable-book/src/language-features/unsized-locals.md b/src/doc/unstable-book/src/language-features/unsized-locals.md index 7a5fe5b7f2..1165ab93a1 100644 --- a/src/doc/unstable-book/src/language-features/unsized-locals.md +++ b/src/doc/unstable-book/src/language-features/unsized-locals.md @@ -80,8 +80,6 @@ fn main() { } ``` -However, the current implementation allows `MyTupleStruct(..)` to be unsized. This will be fixed in the future. - ## By-value trait objects With this feature, you can have by-value `self` arguments without `Self: Sized` bounds. @@ -101,9 +99,9 @@ fn main() { } ``` -And `Foo` will also be object-safe. However, this object-safety is not yet implemented. +And `Foo` will also be object-safe. -```rust,ignore +```rust #![feature(unsized_locals)] trait Foo { @@ -119,8 +117,6 @@ fn main () { } ``` -Unfortunately, this is not implemented yet. - One of the objectives of this feature is to allow `Box`, instead of `Box` in the future. See [#28796] for details. [#28796]: https://github.com/rust-lang/rust/issues/28796 diff --git a/src/etc/debugger_pretty_printers_common.py b/src/etc/debugger_pretty_printers_common.py index 1797f6708a..b99e401929 100644 --- a/src/etc/debugger_pretty_printers_common.py +++ b/src/etc/debugger_pretty_printers_common.py @@ -375,32 +375,6 @@ def extract_tail_head_ptr_and_cap_from_std_vecdeque(vec_val): assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR return (tail, head, data_ptr, capacity) - -def extract_length_and_ptr_from_std_btreeset(vec_val): - assert vec_val.type.get_type_kind() == TYPE_KIND_STD_BTREESET - map = vec_val.get_child_at_index(0) - root = map.get_child_at_index(0) - length = map.get_child_at_index(1).as_integer() - node = root.get_child_at_index(0) - ptr = node.get_child_at_index(0) - unique_ptr_val = ptr.get_child_at_index(0) - data_ptr = unique_ptr_val.get_child_at_index(0) - assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR - return (length, data_ptr) - - -def extract_length_and_ptr_from_std_btreemap(vec_val): - assert vec_val.type.get_type_kind() == TYPE_KIND_STD_BTREEMAP - root = vec_val.get_child_at_index(0) - length = vec_val.get_child_at_index(1).as_integer() - node = root.get_child_at_index(0) - ptr = node.get_child_at_index(0) - unique_ptr_val = ptr.get_child_at_index(0) - data_ptr = unique_ptr_val.get_child_at_index(0) - assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR - return (length, data_ptr) - - def extract_length_and_ptr_from_slice(slice_val): assert (slice_val.type.get_type_kind() == TYPE_KIND_SLICE or slice_val.type.get_type_kind() == TYPE_KIND_STR_SLICE) diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index b1252f386d..a376c8593f 100755 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -9,7 +9,6 @@ # except according to those terms. import gdb -import re import sys import debugger_pretty_printers_common as rustpp @@ -19,6 +18,8 @@ import debugger_pretty_printers_common as rustpp if sys.version_info[0] >= 3: xrange = range +rust_enabled = 'set language rust' in gdb.execute('complete set language ru', to_string = True) + #=============================================================================== # GDB Pretty Printing Module for Rust #=============================================================================== @@ -100,27 +101,9 @@ def rust_pretty_printer_lookup_function(gdb_val): val = GdbValue(gdb_val) type_kind = val.type.get_type_kind() - if type_kind == rustpp.TYPE_KIND_EMPTY: - return RustEmptyPrinter(val) - - if type_kind == rustpp.TYPE_KIND_REGULAR_STRUCT: - return RustStructPrinter(val, - omit_first_field = False, - omit_type_name = False, - is_tuple_like = False) - - if type_kind == rustpp.TYPE_KIND_STRUCT_VARIANT: - return RustStructPrinter(val, - omit_first_field = True, - omit_type_name = False, - is_tuple_like = False) - if type_kind == rustpp.TYPE_KIND_SLICE: return RustSlicePrinter(val) - if type_kind == rustpp.TYPE_KIND_STR_SLICE: - return RustStringSlicePrinter(val) - if type_kind == rustpp.TYPE_KIND_STD_VEC: return RustStdVecPrinter(val) @@ -139,6 +122,29 @@ def rust_pretty_printer_lookup_function(gdb_val): if type_kind == rustpp.TYPE_KIND_OS_STRING: return RustOsStringPrinter(val) + # Checks after this point should only be for "compiler" types -- + # things that gdb's Rust language support knows about. + if rust_enabled: + return None + + if type_kind == rustpp.TYPE_KIND_EMPTY: + return RustEmptyPrinter(val) + + if type_kind == rustpp.TYPE_KIND_REGULAR_STRUCT: + return RustStructPrinter(val, + omit_first_field = False, + omit_type_name = False, + is_tuple_like = False) + + if type_kind == rustpp.TYPE_KIND_STRUCT_VARIANT: + return RustStructPrinter(val, + omit_first_field = True, + omit_type_name = False, + is_tuple_like = False) + + if type_kind == rustpp.TYPE_KIND_STR_SLICE: + return RustStringSlicePrinter(val) + if type_kind == rustpp.TYPE_KIND_TUPLE: return RustStructPrinter(val, omit_first_field = False, @@ -294,17 +300,51 @@ class RustStdVecDequePrinter(object): def to_string(self): (tail, head, data_ptr, cap) = \ rustpp.extract_tail_head_ptr_and_cap_from_std_vecdeque(self.__val) + if head >= tail: + size = head - tail + else: + size = cap + head - tail return (self.__val.type.get_unqualified_type_name() + - ("(len: %i, cap: %i)" % (head - tail, cap))) + ("(len: %i, cap: %i)" % (size, cap))) def children(self): (tail, head, data_ptr, cap) = \ rustpp.extract_tail_head_ptr_and_cap_from_std_vecdeque(self.__val) gdb_ptr = data_ptr.get_wrapped_value() - for index in xrange(tail, head): - yield (str(index), (gdb_ptr + index).dereference()) + if head >= tail: + size = head - tail + else: + size = cap + head - tail + for index in xrange(0, size): + yield (str(index), (gdb_ptr + ((tail + index) % cap)).dereference()) +# Yield each key (and optionally value) from a BoxedNode. +def children_of_node(boxed_node, height, want_values): + ptr = boxed_node['ptr']['pointer'] + # This is written oddly because we don't want to rely on the field name being `__0`. + node_ptr = ptr[ptr.type.fields()[0]] + if height > 0: + type_name = str(node_ptr.type.target()).replace('LeafNode', 'InternalNode') + node_type = gdb.lookup_type(type_name) + node_ptr = node_ptr.cast(node_type.pointer()) + leaf = node_ptr['data'] + else: + leaf = node_ptr.dereference() + keys = leaf['keys']['value']['value'] + if want_values: + values = leaf['vals']['value']['value'] + length = int(leaf['len']) + for i in xrange(0, length + 1): + if height > 0: + for child in children_of_node(node_ptr['edges'][i], height - 1, want_values): + yield child + if i < length: + if want_values: + yield (keys[i], values[i]) + else: + yield keys[i] + class RustStdBTreeSetPrinter(object): def __init__(self, val): self.__val = val @@ -314,21 +354,16 @@ class RustStdBTreeSetPrinter(object): return "array" def to_string(self): - (length, data_ptr) = \ - rustpp.extract_length_and_ptr_from_std_btreeset(self.__val) return (self.__val.type.get_unqualified_type_name() + - ("(len: %i)" % length)) + ("(len: %i)" % self.__val.get_wrapped_value()['map']['length'])) def children(self): - (length, data_ptr) = \ - rustpp.extract_length_and_ptr_from_std_btreeset(self.__val) - leaf_node = GdbValue(data_ptr.get_wrapped_value().dereference()) - maybe_uninit_keys = leaf_node.get_child_at_index(3) - manually_drop_keys = maybe_uninit_keys.get_child_at_index(1) - keys = manually_drop_keys.get_child_at_index(0) - gdb_ptr = keys.get_wrapped_value() - for index in xrange(length): - yield (str(index), gdb_ptr[index]) + root = self.__val.get_wrapped_value()['map']['root'] + node_ptr = root['node'] + i = 0 + for child in children_of_node(node_ptr, root['height'], False): + yield (str(i), child) + i = i + 1 class RustStdBTreeMapPrinter(object): @@ -340,26 +375,17 @@ class RustStdBTreeMapPrinter(object): return "map" def to_string(self): - (length, data_ptr) = \ - rustpp.extract_length_and_ptr_from_std_btreemap(self.__val) return (self.__val.type.get_unqualified_type_name() + - ("(len: %i)" % length)) + ("(len: %i)" % self.__val.get_wrapped_value()['length'])) def children(self): - (length, data_ptr) = \ - rustpp.extract_length_and_ptr_from_std_btreemap(self.__val) - leaf_node = GdbValue(data_ptr.get_wrapped_value().dereference()) - maybe_uninit_keys = leaf_node.get_child_at_index(3) - manually_drop_keys = maybe_uninit_keys.get_child_at_index(1) - keys = manually_drop_keys.get_child_at_index(0) - keys_ptr = keys.get_wrapped_value() - maybe_uninit_vals = leaf_node.get_child_at_index(4) - manually_drop_vals = maybe_uninit_vals.get_child_at_index(1) - vals = manually_drop_vals.get_child_at_index(0) - vals_ptr = vals.get_wrapped_value() - for index in xrange(length): - yield (str(index), keys_ptr[index]) - yield (str(index), vals_ptr[index]) + root = self.__val.get_wrapped_value()['root'] + node_ptr = root['node'] + i = 0 + for child in children_of_node(node_ptr, root['height'], True): + yield (str(i), child[0]) + yield (str(i), child[1]) + i = i + 1 class RustStdStringPrinter(object): diff --git a/src/etc/generate-deriving-span-tests.py b/src/etc/generate-deriving-span-tests.py index 2e9169ce5b..31a438958e 100755 --- a/src/etc/generate-deriving-span-tests.py +++ b/src/etc/generate-deriving-span-tests.py @@ -18,10 +18,10 @@ derives have spans that point to the fields, rather than the sample usage: src/etc/generate-deriving-span-tests.py """ -import sys, os, datetime, stat, re +import os, datetime, stat, re TEST_DIR = os.path.abspath( - os.path.join(os.path.dirname(__file__), '../test/compile-fail')) + os.path.join(os.path.dirname(__file__), '../test/ui/derives/')) YEAR = datetime.datetime.now().year diff --git a/src/etc/lldb_batchmode.py b/src/etc/lldb_batchmode.py index 24a0ce0ac3..b0220c84ef 100644 --- a/src/etc/lldb_batchmode.py +++ b/src/etc/lldb_batchmode.py @@ -12,7 +12,7 @@ # containing LLDB commands (one command per line), this script will execute the commands one after # the other. # LLDB also has the -s and -S commandline options which also execute a list of commands from a text -# file. However, this command are execute `immediately`: a the command of a `run` or `continue` +# file. However, this command are execute `immediately`: the command of a `run` or `continue` # command will be executed immediately after the `run` or `continue`, without waiting for the next # breakpoint to be hit. This a command sequence like the following will not yield reliable results: # diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index 3bd0c243b3..1a8a081e16 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -21,6 +21,10 @@ use core::usize; pub use core::alloc::*; extern "Rust" { + // These are the magic symbols to call the global allocator. rustc generates + // them from the `#[global_allocator]` attribute if there is one, or uses the + // default implementations in libstd (`__rdl_alloc` etc in `src/libstd/alloc.rs`) + // otherwise. #[allocator] #[rustc_allocator_nounwind] fn __rust_alloc(size: usize, align: usize) -> *mut u8; diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index f989e70191..c3a84bf778 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -73,14 +73,15 @@ use core::convert::From; use core::fmt; use core::future::Future; use core::hash::{Hash, Hasher}; -use core::iter::FusedIterator; +use core::iter::{Iterator, FromIterator, FusedIterator}; use core::marker::{Unpin, Unsize}; use core::mem; use core::pin::Pin; -use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState}; +use core::ops::{CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Generator, GeneratorState}; use core::ptr::{self, NonNull, Unique}; use core::task::{LocalWaker, Poll}; +use vec::Vec; use raw_vec::RawVec; use str::from_boxed_utf8_unchecked; @@ -488,7 +489,7 @@ impl Box { /// ``` /// use std::any::Any; /// - /// fn print_if_string(value: Box) { + /// fn print_if_string(value: Box) { /// if let Ok(string) = value.downcast::() { /// println!("String ({}): {}", string.len(), string); /// } @@ -522,7 +523,7 @@ impl Box { /// ``` /// use std::any::Any; /// - /// fn print_if_string(value: Box) { + /// fn print_if_string(value: Box) { /// if let Ok(string) = value.downcast::() { /// println!("String ({}): {}", string.len(), string); /// } @@ -617,10 +618,10 @@ impl FusedIterator for Box {} /// `FnBox` is a version of the `FnOnce` intended for use with boxed /// closure objects. The idea is that where one would normally store a -/// `Box` in a data structure, you should use -/// `Box`. The two traits behave essentially the same, except +/// `Box` in a data structure, you should use +/// `Box`. The two traits behave essentially the same, except /// that a `FnBox` closure can only be called if it is boxed. (Note -/// that `FnBox` may be deprecated in the future if `Box` +/// that `FnBox` may be deprecated in the future if `Box` /// closures become directly usable.) /// /// # Examples @@ -628,7 +629,7 @@ impl FusedIterator for Box {} /// Here is a snippet of code which creates a hashmap full of boxed /// once closures and then removes them one by one, calling each /// closure as it is removed. Note that the type of the closures -/// stored in the map is `Box i32>` and not `Box i32>` and not `Box i32>`. /// /// ``` @@ -637,8 +638,8 @@ impl FusedIterator for Box {} /// use std::boxed::FnBox; /// use std::collections::HashMap; /// -/// fn make_map() -> HashMap i32>> { -/// let mut map: HashMap i32>> = HashMap::new(); +/// fn make_map() -> HashMap i32>> { +/// let mut map: HashMap i32>> = HashMap::new(); /// map.insert(1, Box::new(|| 22)); /// map.insert(2, Box::new(|| 44)); /// map @@ -696,6 +697,16 @@ impl<'a, A, R> FnOnce for Box + Send + 'a> { #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Box {} +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn> for Box {} + +#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")] +impl FromIterator for Box<[A]> { + fn from_iter>(iter: T) -> Self { + iter.into_iter().collect::>().into_boxed_slice() + } +} + #[stable(feature = "box_slice_clone", since = "1.3.0")] impl Clone for Box<[T]> { fn clone(&self) -> Self { diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/boxed_test.rs index 55995742a4..f340ea01c5 100644 --- a/src/liballoc/boxed_test.rs +++ b/src/liballoc/boxed_test.rs @@ -140,3 +140,11 @@ fn str_slice() { let boxed: Box = Box::from(s); assert_eq!(&*boxed, s) } + +#[test] +fn boxed_slice_from_iter() { + let iter = 0..100; + let boxed: Box<[u32]> = iter.collect(); + assert_eq!(boxed.len(), 100); + assert_eq!(boxed[7], 7); +} diff --git a/src/liballoc/collections/binary_heap.rs b/src/liballoc/collections/binary_heap.rs index fcadcb544c..8c36962a29 100644 --- a/src/liballoc/collections/binary_heap.rs +++ b/src/liballoc/collections/binary_heap.rs @@ -529,7 +529,7 @@ impl BinaryHeap { /// assert!(heap.capacity() >= 10); /// ``` #[inline] - #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] pub fn shrink_to(&mut self, min_capacity: usize) { self.data.shrink_to(min_capacity) } diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs index 24c8fd3a96..49e488702b 100644 --- a/src/liballoc/collections/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -853,9 +853,10 @@ impl BTreeMap { /// ``` /// use std::collections::BTreeMap; /// - /// let mut map: BTreeMap<&str, i32> = ["Alice", "Bob", "Carol", "Cheryl"].iter() - /// .map(|&s| (s, 0)) - /// .collect(); + /// let mut map: BTreeMap<&str, i32> = ["Alice", "Bob", "Carol", "Cheryl"] + /// .iter() + /// .map(|&s| (s, 0)) + /// .collect(); /// for (_, balance) in map.range_mut("B".."Cheryl") { /// *balance += 100; /// } diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs index deca9591fb..215689dfc4 100644 --- a/src/liballoc/collections/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -69,7 +69,7 @@ struct LeafNode { /// This node's index into the parent node's `edges` array. /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`. - /// This is only guaranteed to be initialized when `parent` is nonnull. + /// This is only guaranteed to be initialized when `parent` is non-null. parent_idx: MaybeUninit, /// The number of keys and values this node stores. @@ -602,7 +602,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { } else { unsafe { slice::from_raw_parts_mut( - self.as_leaf_mut().keys.get_mut() as *mut [K] as *mut K, + self.as_leaf_mut().keys.as_mut_ptr() as *mut K, self.len() ) } @@ -613,7 +613,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { debug_assert!(!self.is_shared_root()); unsafe { slice::from_raw_parts_mut( - self.as_leaf_mut().vals.get_mut() as *mut [V] as *mut V, + self.as_leaf_mut().vals.as_mut_ptr() as *mut V, self.len() ) } diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs index 571f35a203..c8ee40f3d2 100644 --- a/src/liballoc/collections/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -19,7 +19,7 @@ use core::cmp::Ordering; use core::fmt; -use core::iter::{repeat, FromIterator, FusedIterator}; +use core::iter::{repeat_with, FromIterator, FusedIterator}; use core::mem; use core::ops::Bound::{Excluded, Included, Unbounded}; use core::ops::{Index, IndexMut, RangeBounds}; @@ -36,6 +36,8 @@ use vec::Vec; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 +#[cfg(target_pointer_width = "16")] +const MAXIMUM_ZST_CAPACITY: usize = 1 << (16 - 1); // Largest possible power of two #[cfg(target_pointer_width = "32")] const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two #[cfg(target_pointer_width = "64")] @@ -699,7 +701,7 @@ impl VecDeque { /// buf.shrink_to(0); /// assert!(buf.capacity() >= 4); /// ``` - #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] pub fn shrink_to(&mut self, min_capacity: usize) { assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity"); @@ -1884,6 +1886,44 @@ impl VecDeque { debug_assert!(!self.is_full()); } } + + /// Modifies the `VecDeque` in-place so that `len()` is equal to `new_len`, + /// either by removing excess elements from the back or by appending + /// elements generated by calling `generator` to the back. + /// + /// # Examples + /// + /// ``` + /// #![feature(vec_resize_with)] + /// + /// use std::collections::VecDeque; + /// + /// let mut buf = VecDeque::new(); + /// buf.push_back(5); + /// buf.push_back(10); + /// buf.push_back(15); + /// assert_eq!(buf, [5, 10, 15]); + /// + /// buf.resize_with(5, Default::default); + /// assert_eq!(buf, [5, 10, 15, 0, 0]); + /// + /// buf.resize_with(2, || unreachable!()); + /// assert_eq!(buf, [5, 10]); + /// + /// let mut state = 100; + /// buf.resize_with(5, || { state += 1; state }); + /// assert_eq!(buf, [5, 10, 101, 102, 103]); + /// ``` + #[unstable(feature = "vec_resize_with", issue = "41758")] + pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut()->T) { + let len = self.len(); + + if new_len > len { + self.extend(repeat_with(generator).take(new_len - len)) + } else { + self.truncate(new_len); + } + } } impl VecDeque { @@ -1910,13 +1950,7 @@ impl VecDeque { /// ``` #[stable(feature = "deque_extras", since = "1.16.0")] pub fn resize(&mut self, new_len: usize, value: T) { - let len = self.len(); - - if new_len > len { - self.extend(repeat(value).take(new_len - len)) - } else { - self.truncate(new_len); - } + self.resize_with(new_len, || value.clone()); } } diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 84ca7c4fec..abacc62c85 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -86,7 +86,7 @@ #![feature(box_syntax)] #![feature(cfg_target_has_atomic)] #![feature(coerce_unsized)] -#![cfg_attr(stage0, feature(min_const_fn))] +#![feature(dispatch_from_dyn)] #![feature(core_intrinsics)] #![feature(custom_attribute)] #![feature(dropck_eyepatch)] @@ -119,6 +119,7 @@ #![feature(const_vec_new)] #![feature(slice_partition_dedup)] #![feature(maybe_uninit)] +#![feature(alloc_layout_extra)] // Allow testing this library diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 837770feec..e87bf78561 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -44,7 +44,7 @@ use boxed::Box; /// This enables you to use capacity growing logic catch the overflows in your length /// that might occur with zero-sized types. /// -/// However this means that you need to be careful when roundtripping this type +/// However this means that you need to be careful when round-tripping this type /// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`, /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity /// field. This allows zero-sized types to not be special-cased by consumers of diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 39a261d3d5..3ca6de191d 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -254,7 +254,7 @@ use core::marker; use core::marker::{Unpin, Unsize, PhantomData}; use core::mem::{self, align_of_val, forget, size_of_val}; use core::ops::Deref; -use core::ops::CoerceUnsized; +use core::ops::{CoerceUnsized, DispatchFromDyn}; use core::pin::Pin; use core::ptr::{self, NonNull}; use core::convert::From; @@ -281,7 +281,7 @@ struct RcBox { /// type `T`. /// /// [get_mut]: #method.get_mut -#[cfg_attr(all(not(stage0), not(test)), lang = "rc")] +#[cfg_attr(not(test), lang = "rc")] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { ptr: NonNull>, @@ -296,6 +296,9 @@ impl !marker::Sync for Rc {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Rc {} +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn> for Rc {} + impl Rc { /// Constructs a new `Rc`. /// @@ -630,7 +633,7 @@ impl Rc { impl Rc { #[inline] #[stable(feature = "rc_downcast", since = "1.29.0")] - /// Attempt to downcast the `Rc` to a concrete type. + /// Attempt to downcast the `Rc` to a concrete type. /// /// # Examples /// @@ -638,7 +641,7 @@ impl Rc { /// use std::any::Any; /// use std::rc::Rc; /// - /// fn print_if_string(value: Rc) { + /// fn print_if_string(value: Rc) { /// if let Ok(string) = value.downcast::() { /// println!("String ({}): {}", string.len(), string); /// } @@ -664,16 +667,20 @@ impl Rc { impl Rc { // Allocates an `RcBox` with sufficient space for an unsized value unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox { - // Create a fake RcBox to find allocation size and alignment - let fake_ptr = ptr as *mut RcBox; - - let layout = Layout::for_value(&*fake_ptr); + // Calculate layout using the given value. + // Previously, layout was calculated on the expression + // `&*(ptr as *const RcBox)`, but this created a misaligned + // reference (see #54908). + let layout = Layout::new::>() + .extend(Layout::for_value(&*ptr)).unwrap().0 + .pad_to_align().unwrap(); let mem = Global.alloc(layout) .unwrap_or_else(|_| handle_alloc_error(layout)); - // Initialize the real RcBox + // Initialize the RcBox let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox; + debug_assert_eq!(Layout::for_value(&*inner), layout); ptr::write(&mut (*inner).strong, Cell::new(1)); ptr::write(&mut (*inner).weak, Cell::new(1)); @@ -1175,6 +1182,9 @@ impl !marker::Sync for Weak {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Weak {} +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn> for Weak {} + impl Weak { /// Constructs a new `Weak`, without allocating any memory. /// Calling [`upgrade`][Weak::upgrade] on the return value always gives [`None`]. diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index 1eaff7410e..22da9dd6e9 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -213,6 +213,22 @@ impl [T] { /// /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. /// + /// The comparator function must define a total ordering for the elements in the slice. If + /// the ordering is not total, the order of the elements is unspecified. An order is a + /// total order if it is (for all a, b and c): + /// + /// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and + /// * transitive, a < b and b < c implies a < c. The same must hold for both == and >. + /// + /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use + /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`. + /// + /// ``` + /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0]; + /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap()); + /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]); + /// ``` + /// /// When applicable, unstable sorting is preferred because it is generally faster than stable /// sorting and it doesn't allocate auxiliary memory. /// See [`sort_unstable_by`](#method.sort_unstable_by). diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index ab3f8fc270..662f8ae614 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -413,7 +413,7 @@ impl String { /// /// // These are all done without reallocating... /// let cap = s.capacity(); - /// for i in 0..10 { + /// for _ in 0..10 { /// s.push('a'); /// } /// @@ -502,7 +502,7 @@ impl String { #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8(vec: Vec) -> Result { match str::from_utf8(&vec) { - Ok(..) => Ok(String { vec: vec }), + Ok(..) => Ok(String { vec }), Err(e) => { Err(FromUtf8Error { bytes: vec, @@ -618,7 +618,17 @@ impl String { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf16(v: &[u16]) -> Result { - decode_utf16(v.iter().cloned()).collect::>().map_err(|_| FromUtf16Error(())) + // This isn't done via collect::>() for performance reasons. + // FIXME: the function can be simplified again when #48994 is closed. + let mut ret = String::with_capacity(v.len()); + for c in decode_utf16(v.iter().cloned()) { + if let Ok(c) = c { + ret.push(c); + } else { + return Err(FromUtf16Error(())); + } + } + Ok(ret) } /// Decode a UTF-16 encoded slice `v` into a `String`, replacing @@ -1040,7 +1050,7 @@ impl String { /// assert!(s.capacity() >= 3); /// ``` #[inline] - #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] pub fn shrink_to(&mut self, min_capacity: usize) { self.vec.shrink_to(min_capacity) } @@ -2206,6 +2216,20 @@ impl<'a> From<&'a str> for String { #[cfg(not(test))] #[stable(feature = "string_from_box", since = "1.18.0")] impl From> for String { + /// Converts the given boxed `str` slice to a `String`. + /// It is notable that the `str` slice is owned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s1: String = String::from("hello world"); + /// let s2: Box = s1.into_boxed_str(); + /// let s3: String = String::from(s2); + /// + /// assert_eq!("hello world", s3) + /// ``` fn from(s: Box) -> String { s.into_string() } @@ -2213,6 +2237,19 @@ impl From> for String { #[stable(feature = "box_from_str", since = "1.20.0")] impl From for Box { + /// Converts the given `String` to a boxed `str` slice that is owned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s1: String = String::from("hello world"); + /// let s2: Box = Box::from(s1); + /// let s3: String = String::from(s2); + /// + /// assert_eq!("hello world", s3) + /// ``` fn from(s: String) -> Box { s.into_boxed_str() } @@ -2272,6 +2309,20 @@ impl<'a> FromIterator for Cow<'a, str> { #[stable(feature = "from_string_for_vec_u8", since = "1.14.0")] impl From for Vec { + /// Converts the given `String` to a vector `Vec` that holds values of type `u8`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s1 = String::from("hello world"); + /// let v1 = Vec::from(s1); + /// + /// for b in v1 { + /// println!("{}", b); + /// } + /// ``` fn from(string: String) -> Vec { string.into_bytes() } diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index db3049b8fa..4f4031e3c4 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -25,7 +25,7 @@ use core::cmp::Ordering; use core::intrinsics::abort; use core::mem::{self, align_of_val, size_of_val}; use core::ops::Deref; -use core::ops::CoerceUnsized; +use core::ops::{CoerceUnsized, DispatchFromDyn}; use core::pin::Pin; use core::ptr::{self, NonNull}; use core::marker::{Unpin, Unsize, PhantomData}; @@ -198,7 +198,7 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// counting in general. /// /// [rc_examples]: ../../std/rc/index.html#examples -#[cfg_attr(all(not(stage0), not(test)), lang = "arc")] +#[cfg_attr(not(test), lang = "arc")] #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc { ptr: NonNull>, @@ -213,6 +213,9 @@ unsafe impl Sync for Arc {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Arc {} +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn> for Arc {} + /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the /// managed value. The value is accessed by calling [`upgrade`] on the `Weak` /// pointer, which returns an [`Option`]`<`[`Arc`]`>`. @@ -253,6 +256,8 @@ unsafe impl Sync for Weak {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Weak {} +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn> for Weak {} #[stable(feature = "arc_weak", since = "1.4.0")] impl fmt::Debug for Weak { @@ -565,16 +570,20 @@ impl Arc { impl Arc { // Allocates an `ArcInner` with sufficient space for an unsized value unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner { - // Create a fake ArcInner to find allocation size and alignment - let fake_ptr = ptr as *mut ArcInner; - - let layout = Layout::for_value(&*fake_ptr); + // Calculate layout using the given value. + // Previously, layout was calculated on the expression + // `&*(ptr as *const ArcInner)`, but this created a misaligned + // reference (see #54908). + let layout = Layout::new::>() + .extend(Layout::for_value(&*ptr)).unwrap().0 + .pad_to_align().unwrap(); let mem = Global.alloc(layout) .unwrap_or_else(|_| handle_alloc_error(layout)); - // Initialize the real ArcInner + // Initialize the ArcInner let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner; + debug_assert_eq!(Layout::for_value(&*inner), layout); ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs index 6fa88ce969..bf256b23f9 100644 --- a/src/liballoc/tests/heap.rs +++ b/src/liballoc/tests/heap.rs @@ -8,13 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use alloc_system::System; -use std::alloc::{Global, Alloc, Layout}; +use std::alloc::{Global, Alloc, Layout, System}; /// https://github.com/rust-lang/rust/issues/45955 -/// -/// Note that `#[global_allocator]` is not used, -/// so `liballoc_jemalloc` is linked (on some platforms). #[test] fn alloc_system_overaligned_request() { check_overalign_requests(System) diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index 6d1cfb1085..e514a8a69c 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -9,9 +9,7 @@ // except according to those terms. #![feature(allocator_api)] -#![feature(alloc_system)] #![feature(box_syntax)] -#![cfg_attr(stage0, feature(min_const_fn))] #![feature(drain_filter)] #![feature(exact_size_is_empty)] #![feature(pattern)] @@ -21,7 +19,6 @@ #![feature(unboxed_closures)] #![feature(repeat_generic_slice)] -extern crate alloc_system; extern crate core; extern crate rand; diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index f7a0bbdcea..ca7c766e41 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -613,7 +613,7 @@ impl Vec { /// vec.shrink_to(0); /// assert!(vec.capacity() >= 3); /// ``` - #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] pub fn shrink_to(&mut self, min_capacity: usize) { self.buf.shrink_to_fit(cmp::max(self.len, min_capacity)); } diff --git a/src/liballoc_jemalloc/Cargo.toml b/src/liballoc_jemalloc/Cargo.toml deleted file mode 100644 index 7986d5dd2e..0000000000 --- a/src/liballoc_jemalloc/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "alloc_jemalloc" -version = "0.0.0" -build = "build.rs" -links = "jemalloc" - -[lib] -name = "alloc_jemalloc" -path = "lib.rs" -test = false -doc = false - -[dependencies] -core = { path = "../libcore" } -libc = { path = "../rustc/libc_shim" } -compiler_builtins = { path = "../rustc/compiler_builtins_shim" } - -[build-dependencies] -build_helper = { path = "../build_helper" } -cc = "1.0.1" - -[features] -debug = [] diff --git a/src/liballoc_jemalloc/build.rs b/src/liballoc_jemalloc/build.rs deleted file mode 100644 index fbda425a70..0000000000 --- a/src/liballoc_jemalloc/build.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![deny(warnings)] - -extern crate build_helper; -extern crate cc; - -use std::env; -use std::path::PathBuf; -use std::process::Command; -use build_helper::{run, native_lib_boilerplate}; - -fn main() { - // FIXME: This is a hack to support building targets that don't - // support jemalloc alongside hosts that do. The jemalloc build is - // controlled by a feature of the std crate, and if that feature - // changes between targets, it invalidates the fingerprint of - // std's build script (this is a cargo bug); so we must ensure - // that the feature set used by std is the same across all - // targets, which means we have to build the alloc_jemalloc crate - // for targets like emscripten, even if we don't use it. - let target = env::var("TARGET").expect("TARGET was not set"); - let host = env::var("HOST").expect("HOST was not set"); - if target.contains("bitrig") || target.contains("emscripten") || target.contains("fuchsia") || - target.contains("msvc") || target.contains("openbsd") || target.contains("redox") || - target.contains("rumprun") || target.contains("wasm32") { - println!("cargo:rustc-cfg=dummy_jemalloc"); - return; - } - - // CloudABI ships with a copy of jemalloc that has been patched to - // work well with sandboxing. Don't attempt to build our own copy, - // as it won't build. - if target.contains("cloudabi") { - return; - } - - if target.contains("android") { - println!("cargo:rustc-link-lib=gcc"); - } else if !target.contains("windows") && !target.contains("musl") { - println!("cargo:rustc-link-lib=pthread"); - } - - if let Some(jemalloc) = env::var_os("JEMALLOC_OVERRIDE") { - let jemalloc = PathBuf::from(jemalloc); - println!("cargo:rustc-link-search=native={}", - jemalloc.parent().unwrap().display()); - let stem = jemalloc.file_stem().unwrap().to_str().unwrap(); - let name = jemalloc.file_name().unwrap().to_str().unwrap(); - let kind = if name.ends_with(".a") { - "static" - } else { - "dylib" - }; - println!("cargo:rustc-link-lib={}={}", kind, &stem[3..]); - return; - } - - let link_name = if target.contains("windows") { "jemalloc" } else { "jemalloc_pic" }; - let native = match native_lib_boilerplate("jemalloc", "jemalloc", link_name, "lib") { - Ok(native) => native, - _ => return, - }; - - let mut cmd = Command::new("sh"); - cmd.arg(native.src_dir.join("configure") - .to_str() - .unwrap() - .replace("C:\\", "/c/") - .replace("\\", "/")) - .current_dir(&native.out_dir) - // jemalloc generates Makefile deps using GCC's "-MM" flag. This means - // that GCC will run the preprocessor, and only the preprocessor, over - // jemalloc's source files. If we don't specify CPPFLAGS, then at least - // on ARM that step fails with a "Missing implementation for 32-bit - // atomic operations" error. This is because no "-march" flag will be - // passed to GCC, and then GCC won't define the - // "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4" macro that jemalloc needs to - // select an atomic operation implementation. - .env("CPPFLAGS", env::var_os("CFLAGS").unwrap_or_default()); - - if target.contains("ios") { - cmd.arg("--disable-tls"); - } else if target.contains("android") { - // We force android to have prefixed symbols because apparently - // replacement of the libc allocator doesn't quite work. When this was - // tested (unprefixed symbols), it was found that the `realpath` - // function in libc would allocate with libc malloc (not jemalloc - // malloc), and then the standard library would free with jemalloc free, - // causing a segfault. - // - // If the test suite passes, however, without symbol prefixes then we - // should be good to go! - cmd.arg("--with-jemalloc-prefix=je_"); - cmd.arg("--disable-tls"); - } else if target.contains("dragonfly") || target.contains("musl") { - cmd.arg("--with-jemalloc-prefix=je_"); - } - - if cfg!(feature = "debug") { - // Enable jemalloc assertions. - cmd.arg("--enable-debug"); - } - - cmd.arg(format!("--host={}", build_helper::gnu_target(&target))); - cmd.arg(format!("--build={}", build_helper::gnu_target(&host))); - - // for some reason, jemalloc configure doesn't detect this value - // automatically for this target - if target == "sparc64-unknown-linux-gnu" { - cmd.arg("--with-lg-quantum=4"); - } - - run(&mut cmd); - - let mut make = Command::new(build_helper::make(&host)); - make.current_dir(&native.out_dir) - .arg("build_lib_static"); - - // These are intended for mingw32-make which we don't use - if cfg!(windows) { - make.env_remove("MAKEFLAGS").env_remove("MFLAGS"); - } - - // mingw make seems... buggy? unclear... - if !host.contains("windows") { - make.arg("-j") - .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")); - } - - run(&mut make); - - // The pthread_atfork symbols is used by jemalloc on android but the really - // old android we're building on doesn't have them defined, so just make - // sure the symbols are available. - if target.contains("androideabi") { - println!("cargo:rerun-if-changed=pthread_atfork_dummy.c"); - cc::Build::new() - .flag("-fvisibility=hidden") - .file("pthread_atfork_dummy.c") - .compile("pthread_atfork_dummy"); - } -} diff --git a/src/liballoc_jemalloc/lib.rs b/src/liballoc_jemalloc/lib.rs deleted file mode 100644 index 0065e84a7a..0000000000 --- a/src/liballoc_jemalloc/lib.rs +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![no_std] -#![allow(unused_attributes)] -#![unstable(feature = "alloc_jemalloc", - reason = "implementation detail of std, does not provide any public API", - issue = "0")] -#![feature(core_intrinsics)] -#![feature(libc)] -#![feature(linkage)] -#![feature(nll)] -#![feature(staged_api)] -#![feature(rustc_attrs)] -#![cfg_attr(dummy_jemalloc, allow(dead_code, unused_extern_crates))] -#![cfg_attr(not(dummy_jemalloc), feature(allocator_api))] -#![rustc_alloc_kind = "exe"] - -extern crate libc; - -#[cfg(not(dummy_jemalloc))] -pub use contents::*; -#[cfg(not(dummy_jemalloc))] -mod contents { - use libc::{c_int, c_void, size_t}; - - // Note that the symbols here are prefixed by default on macOS and Windows (we - // don't explicitly request it), and on Android and DragonFly we explicitly - // request it as unprefixing cause segfaults (mismatches in allocators). - extern "C" { - #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", - target_os = "dragonfly", target_os = "windows", target_env = "musl"), - link_name = "je_mallocx")] - fn mallocx(size: size_t, flags: c_int) -> *mut c_void; - #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", - target_os = "dragonfly", target_os = "windows", target_env = "musl"), - link_name = "je_calloc")] - fn calloc(size: size_t, flags: c_int) -> *mut c_void; - #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", - target_os = "dragonfly", target_os = "windows", target_env = "musl"), - link_name = "je_rallocx")] - fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; - #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", - target_os = "dragonfly", target_os = "windows", target_env = "musl"), - link_name = "je_sdallocx")] - fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); - } - - const MALLOCX_ZERO: c_int = 0x40; - - // The minimum alignment guaranteed by the architecture. This value is used to - // add fast paths for low alignment values. - #[cfg(all(any(target_arch = "arm", - target_arch = "mips", - target_arch = "powerpc")))] - const MIN_ALIGN: usize = 8; - #[cfg(all(any(target_arch = "x86", - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "mips64", - target_arch = "s390x", - target_arch = "sparc64")))] - const MIN_ALIGN: usize = 16; - - // MALLOCX_ALIGN(a) macro - fn mallocx_align(a: usize) -> c_int { - a.trailing_zeros() as c_int - } - - fn align_to_flags(align: usize, size: usize) -> c_int { - if align <= MIN_ALIGN && align <= size { - 0 - } else { - mallocx_align(align) - } - } - - // for symbol names src/librustc/middle/allocator.rs - // for signatures src/librustc_allocator/lib.rs - - // linkage directives are provided as part of the current compiler allocator - // ABI - - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_alloc(size: usize, align: usize) -> *mut u8 { - let flags = align_to_flags(align, size); - let ptr = mallocx(size as size_t, flags) as *mut u8; - ptr - } - - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_dealloc(ptr: *mut u8, - size: usize, - align: usize) { - let flags = align_to_flags(align, size); - sdallocx(ptr as *mut c_void, size, flags); - } - - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_realloc(ptr: *mut u8, - _old_size: usize, - align: usize, - new_size: usize) -> *mut u8 { - let flags = align_to_flags(align, new_size); - let ptr = rallocx(ptr as *mut c_void, new_size, flags) as *mut u8; - ptr - } - - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_alloc_zeroed(size: usize, align: usize) -> *mut u8 { - let ptr = if align <= MIN_ALIGN && align <= size { - calloc(size as size_t, 1) as *mut u8 - } else { - let flags = align_to_flags(align, size) | MALLOCX_ZERO; - mallocx(size as size_t, flags) as *mut u8 - }; - ptr - } -} diff --git a/src/liballoc_system/Cargo.toml b/src/liballoc_system/Cargo.toml deleted file mode 100644 index c34e2f203a..0000000000 --- a/src/liballoc_system/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "alloc_system" -version = "0.0.0" - -[lib] -name = "alloc_system" -path = "lib.rs" -test = false -doc = false - -[dependencies] -core = { path = "../libcore" } -libc = { path = "../rustc/libc_shim" } -compiler_builtins = { path = "../rustc/compiler_builtins_shim" } - -# See comments in the source for what this dependency is -[target.'cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))'.dependencies] -dlmalloc = { path = "../rustc/dlmalloc_shim" } diff --git a/src/liballoc_system/lib.rs b/src/liballoc_system/lib.rs deleted file mode 100644 index 15283036bb..0000000000 --- a/src/liballoc_system/lib.rs +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![no_std] -#![allow(unused_attributes)] -#![unstable(feature = "alloc_system", - reason = "this library is unlikely to be stabilized in its current \ - form or name", - issue = "32838")] - -#![feature(allocator_api)] -#![feature(core_intrinsics)] -#![feature(nll)] -#![feature(staged_api)] -#![feature(rustc_attrs)] -#![cfg_attr( - all(target_arch = "wasm32", not(target_os = "emscripten")), - feature(integer_atomics, stdsimd) -)] -#![cfg_attr(any(unix, target_os = "cloudabi", target_os = "redox"), feature(libc))] -#![rustc_alloc_kind = "lib"] - -// The minimum alignment guaranteed by the architecture. This value is used to -// add fast paths for low alignment values. -#[cfg(all(any(target_arch = "x86", - target_arch = "arm", - target_arch = "mips", - target_arch = "powerpc", - target_arch = "powerpc64", - target_arch = "asmjs", - target_arch = "wasm32")))] -#[allow(dead_code)] -const MIN_ALIGN: usize = 8; -#[cfg(all(any(target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "mips64", - target_arch = "s390x", - target_arch = "sparc64")))] -#[allow(dead_code)] -const MIN_ALIGN: usize = 16; - -use core::alloc::{Alloc, GlobalAlloc, AllocErr, Layout}; -use core::ptr::NonNull; - -/// The default memory allocator provided by the operating system. -/// -/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows, -/// plus related functions. -/// -/// This type can be used in a `static` item -/// with the `#[global_allocator]` attribute -/// to force the global allocator to be the system’s one. -/// (The default is jemalloc for executables, on some platforms.) -/// -/// ```rust -/// use std::alloc::System; -/// -/// #[global_allocator] -/// static A: System = System; -/// -/// fn main() { -/// let a = Box::new(4); // Allocates from the system allocator. -/// println!("{}", a); -/// } -/// ``` -/// -/// It can also be used directly to allocate memory -/// independently of the standard library’s global allocator. -#[stable(feature = "alloc_system_type", since = "1.28.0")] -pub struct System; - -#[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl Alloc for System { - #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { - NonNull::new(GlobalAlloc::alloc(self, layout)).ok_or(AllocErr) - } - - #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result, AllocErr> { - NonNull::new(GlobalAlloc::alloc_zeroed(self, layout)).ok_or(AllocErr) - } - - #[inline] - unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { - GlobalAlloc::dealloc(self, ptr.as_ptr(), layout) - } - - #[inline] - unsafe fn realloc(&mut self, - ptr: NonNull, - layout: Layout, - new_size: usize) -> Result, AllocErr> { - NonNull::new(GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size)).ok_or(AllocErr) - } -} - -#[cfg(any(windows, unix, target_os = "cloudabi", target_os = "redox"))] -mod realloc_fallback { - use core::alloc::{GlobalAlloc, Layout}; - use core::cmp; - use core::ptr; - - impl super::System { - pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout, - new_size: usize) -> *mut u8 { - // Docs for GlobalAlloc::realloc require this to be valid: - let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); - - let new_ptr = GlobalAlloc::alloc(self, new_layout); - if !new_ptr.is_null() { - let size = cmp::min(old_layout.size(), new_size); - ptr::copy_nonoverlapping(ptr, new_ptr, size); - GlobalAlloc::dealloc(self, ptr, old_layout); - } - new_ptr - } - } -} - -#[cfg(any(unix, target_os = "cloudabi", target_os = "redox"))] -mod platform { - extern crate libc; - - use core::ptr; - - use MIN_ALIGN; - use System; - use core::alloc::{GlobalAlloc, Layout}; - - #[stable(feature = "alloc_system_type", since = "1.28.0")] - unsafe impl GlobalAlloc for System { - #[inline] - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { - libc::malloc(layout.size()) as *mut u8 - } else { - #[cfg(target_os = "macos")] - { - if layout.align() > (1 << 31) { - return ptr::null_mut() - } - } - aligned_malloc(&layout) - } - } - - #[inline] - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { - libc::calloc(layout.size(), 1) as *mut u8 - } else { - let ptr = self.alloc(layout.clone()); - if !ptr.is_null() { - ptr::write_bytes(ptr, 0, layout.size()); - } - ptr - } - } - - #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { - libc::free(ptr as *mut libc::c_void) - } - - #[inline] - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - if layout.align() <= MIN_ALIGN && layout.align() <= new_size { - libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 - } else { - self.realloc_fallback(ptr, layout, new_size) - } - } - } - - #[cfg(any(target_os = "android", - target_os = "hermit", - target_os = "redox", - target_os = "solaris"))] - #[inline] - unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { - // On android we currently target API level 9 which unfortunately - // doesn't have the `posix_memalign` API used below. Instead we use - // `memalign`, but this unfortunately has the property on some systems - // where the memory returned cannot be deallocated by `free`! - // - // Upon closer inspection, however, this appears to work just fine with - // Android, so for this platform we should be fine to call `memalign` - // (which is present in API level 9). Some helpful references could - // possibly be chromium using memalign [1], attempts at documenting that - // memalign + free is ok [2] [3], or the current source of chromium - // which still uses memalign on android [4]. - // - // [1]: https://codereview.chromium.org/10796020/ - // [2]: https://code.google.com/p/android/issues/detail?id=35391 - // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579 - // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/ - // /memory/aligned_memory.cc - libc::memalign(layout.align(), layout.size()) as *mut u8 - } - - #[cfg(not(any(target_os = "android", - target_os = "hermit", - target_os = "redox", - target_os = "solaris")))] - #[inline] - unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { - let mut out = ptr::null_mut(); - let ret = libc::posix_memalign(&mut out, layout.align(), layout.size()); - if ret != 0 { - ptr::null_mut() - } else { - out as *mut u8 - } - } -} - -#[cfg(windows)] -#[allow(nonstandard_style)] -mod platform { - use MIN_ALIGN; - use System; - use core::alloc::{GlobalAlloc, Layout}; - - type LPVOID = *mut u8; - type HANDLE = LPVOID; - type SIZE_T = usize; - type DWORD = u32; - type BOOL = i32; - - extern "system" { - fn GetProcessHeap() -> HANDLE; - fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; - fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID; - fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; - fn GetLastError() -> DWORD; - } - - #[repr(C)] - struct Header(*mut u8); - - const HEAP_ZERO_MEMORY: DWORD = 0x00000008; - - unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { - &mut *(ptr as *mut Header).offset(-1) - } - - unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { - let aligned = ptr.add(align - (ptr as usize & (align - 1))); - *get_header(aligned) = Header(ptr); - aligned - } - - #[inline] - unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 { - let ptr = if layout.align() <= MIN_ALIGN { - HeapAlloc(GetProcessHeap(), flags, layout.size()) - } else { - let size = layout.size() + layout.align(); - let ptr = HeapAlloc(GetProcessHeap(), flags, size); - if ptr.is_null() { - ptr - } else { - align_ptr(ptr, layout.align()) - } - }; - ptr as *mut u8 - } - - #[stable(feature = "alloc_system_type", since = "1.28.0")] - unsafe impl GlobalAlloc for System { - #[inline] - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - allocate_with_flags(layout, 0) - } - - #[inline] - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - allocate_with_flags(layout, HEAP_ZERO_MEMORY) - } - - #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if layout.align() <= MIN_ALIGN { - let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID); - debug_assert!(err != 0, "Failed to free heap memory: {}", - GetLastError()); - } else { - let header = get_header(ptr); - let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID); - debug_assert!(err != 0, "Failed to free heap memory: {}", - GetLastError()); - } - } - - #[inline] - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - if layout.align() <= MIN_ALIGN { - HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8 - } else { - self.realloc_fallback(ptr, layout, new_size) - } - } - } -} - -// This is an implementation of a global allocator on the wasm32 platform when -// emscripten is not in use. In that situation there's no actual runtime for us -// to lean on for allocation, so instead we provide our own! -// -// The wasm32 instruction set has two instructions for getting the current -// amount of memory and growing the amount of memory. These instructions are the -// foundation on which we're able to build an allocator, so we do so! Note that -// the instructions are also pretty "global" and this is the "global" allocator -// after all! -// -// The current allocator here is the `dlmalloc` crate which we've got included -// in the rust-lang/rust repository as a submodule. The crate is a port of -// dlmalloc.c from C to Rust and is basically just so we can have "pure Rust" -// for now which is currently technically required (can't link with C yet). -// -// The crate itself provides a global allocator which on wasm has no -// synchronization as there are no threads! -#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] -mod platform { - extern crate dlmalloc; - - use core::alloc::{GlobalAlloc, Layout}; - use System; - - static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT; - - #[stable(feature = "alloc_system_type", since = "1.28.0")] - unsafe impl GlobalAlloc for System { - #[inline] - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let _lock = lock::lock(); - DLMALLOC.malloc(layout.size(), layout.align()) - } - - #[inline] - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let _lock = lock::lock(); - DLMALLOC.calloc(layout.size(), layout.align()) - } - - #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - let _lock = lock::lock(); - DLMALLOC.free(ptr, layout.size(), layout.align()) - } - - #[inline] - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - let _lock = lock::lock(); - DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) - } - } - - #[cfg(target_feature = "atomics")] - mod lock { - use core::arch::wasm32; - use core::sync::atomic::{AtomicI32, Ordering::SeqCst}; - - static LOCKED: AtomicI32 = AtomicI32::new(0); - - pub struct DropLock; - - pub fn lock() -> DropLock { - loop { - if LOCKED.swap(1, SeqCst) == 0 { - return DropLock - } - unsafe { - let r = wasm32::atomic::wait_i32( - &LOCKED as *const AtomicI32 as *mut i32, - 1, // expected value - -1, // timeout - ); - debug_assert!(r == 0 || r == 1); - } - } - } - - impl Drop for DropLock { - fn drop(&mut self) { - let r = LOCKED.swap(0, SeqCst); - debug_assert_eq!(r, 1); - unsafe { - wasm32::atomic::wake( - &LOCKED as *const AtomicI32 as *mut i32, - 1, // only one thread - ); - } - } - } - } - - #[cfg(not(target_feature = "atomics"))] - mod lock { - pub fn lock() {} // no atomics, no threads, that's easy! - } -} diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index dae05a368f..aef3edd9eb 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -224,14 +224,14 @@ impl TypedArena { unsafe { // Clear the last chunk, which is partially filled. let mut chunks_borrow = self.chunks.borrow_mut(); - if let Some(mut last_chunk) = chunks_borrow.pop() { + if let Some(mut last_chunk) = chunks_borrow.last_mut() { self.clear_last_chunk(&mut last_chunk); + let len = chunks_borrow.len(); // If `T` is ZST, code below has no effect. - for mut chunk in chunks_borrow.drain(..) { + for mut chunk in chunks_borrow.drain(..len-1) { let cap = chunk.storage.cap(); chunk.destroy(cap); } - chunks_borrow.push(last_chunk); } } } @@ -298,6 +298,7 @@ pub struct DroplessArena { unsafe impl Send for DroplessArena {} impl Default for DroplessArena { + #[inline] fn default() -> DroplessArena { DroplessArena { ptr: Cell::new(0 as *mut u8), @@ -310,15 +311,11 @@ impl Default for DroplessArena { impl DroplessArena { pub fn in_arena(&self, ptr: *const T) -> bool { let ptr = ptr as *const u8 as *mut u8; - for chunk in &*self.chunks.borrow() { - if chunk.start() <= ptr && ptr < chunk.end() { - return true; - } - } - false + self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end()) } + #[inline] fn align(&self, align: usize) { let final_address = ((self.ptr.get() as usize) + align - 1) & !(align - 1); self.ptr.set(final_address as *mut u8); @@ -408,7 +405,7 @@ impl DroplessArena { { assert!(!mem::needs_drop::()); assert!(mem::size_of::() != 0); - assert!(slice.len() != 0); + assert!(!slice.is_empty()); let mem = self.alloc_raw( slice.len() * mem::size_of::(), @@ -604,6 +601,15 @@ mod tests { } } + #[bench] + pub fn bench_typed_arena_clear(b: &mut Bencher) { + let mut arena = TypedArena::default(); + b.iter(|| { + arena.alloc(Point { x: 1, y: 2, z: 3 }); + arena.clear(); + }) + } + // Drop tests struct DropCounter<'a> { diff --git a/src/libcompiler_builtins/.travis.yml b/src/libcompiler_builtins/.travis.yml index 7b7ed7e6bc..9b4c28419f 100644 --- a/src/libcompiler_builtins/.travis.yml +++ b/src/libcompiler_builtins/.travis.yml @@ -11,7 +11,7 @@ matrix: - env: TARGET=arm-unknown-linux-gnueabihf - env: TARGET=armv7-unknown-linux-gnueabihf - env: TARGET=i586-unknown-linux-gnu - - env: TARGET=i686-apple-darwin + - env: TARGET=i686-apple-darwin DEBUG_LTO_BUILD_DOESNT_WORK=1 os: osx - env: TARGET=i686-unknown-linux-gnu - env: TARGET=mips-unknown-linux-gnu @@ -28,7 +28,7 @@ matrix: - env: TARGET=wasm32-unknown-unknown install: rustup target add $TARGET script: cargo build --target $TARGET - - env: TARGET=x86_64-apple-darwin + - env: TARGET=x86_64-apple-darwin DEBUG_LTO_BUILD_DOESNT_WORK=1 os: osx - env: TARGET=x86_64-unknown-linux-gnu allow_failures: diff --git a/src/libcompiler_builtins/build.rs b/src/libcompiler_builtins/build.rs index cdcbfe24cd..6f2cc76a96 100644 --- a/src/libcompiler_builtins/build.rs +++ b/src/libcompiler_builtins/build.rs @@ -110,9 +110,6 @@ mod c { let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap(); let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap(); let target_vendor = env::var("CARGO_CFG_TARGET_VENDOR").unwrap(); - let target_arch_arm = - target_arch.contains("arm") || - target_arch.contains("thumb"); let cfg = &mut cc::Build::new(); cfg.warnings(false); @@ -141,29 +138,6 @@ mod c { cfg.define("VISIBILITY_HIDDEN", None); } - // NOTE Most of the ARM intrinsics are written in assembly. Tell gcc which arch we are going - // to target to make sure that the assembly implementations really work for the target. If - // the implementation is not valid for the arch, then gcc will error when compiling it. - if llvm_target[0].starts_with("thumb") { - cfg.flag("-mthumb"); - } - - if target_arch_arm && llvm_target.last() == Some(&"eabihf") { - cfg.flag("-mfloat-abi=hard"); - } - - if llvm_target[0] == "thumbv6m" { - cfg.flag("-march=armv6-m"); - } - - if llvm_target[0] == "thumbv7m" { - cfg.flag("-march=armv7-m"); - } - - if llvm_target[0] == "thumbv7em" { - cfg.flag("-march=armv7e-m"); - } - let mut sources = Sources::new(); sources.extend( &[ @@ -299,7 +273,7 @@ mod c { } } - if target_arch == "arm" && target_os != "ios" { + if target_arch == "arm" && target_os != "ios" && target_env != "msvc" { sources.extend( &[ "arm/aeabi_div0.c", diff --git a/src/libcompiler_builtins/compiler-rt/CMakeLists.txt b/src/libcompiler_builtins/compiler-rt/CMakeLists.txt index 193342c1c6..4697dba624 100644 --- a/src/libcompiler_builtins/compiler-rt/CMakeLists.txt +++ b/src/libcompiler_builtins/compiler-rt/CMakeLists.txt @@ -82,6 +82,61 @@ if (COMPILER_RT_STANDALONE_BUILD) or specify the PYTHON_EXECUTABLE CMake variable.") endif() + # Ensure that fat libraries are built correctly on Darwin + if(CMAKE_SYSTEM_NAME STREQUAL Darwin) + if(NOT CMAKE_LIBTOOL) + find_program(CMAKE_XCRUN + NAMES + xcrun) + if(CMAKE_XCRUN) + execute_process(COMMAND + ${CMAKE_XCRUN} -find libtool + OUTPUT_VARIABLE + CMAKE_LIBTOOL + OUTPUT_STRIP_TRAILING_WHITESPACE) + endif() + + if(NOT CMAKE_LIBTOOL OR NOT EXISTS CMAKE_LIBTOOL) + find_program(CMAKE_LIBTOOL + NAMES + libtool) + endif() + endif() + + get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES) + + if(CMAKE_LIBTOOL) + set(CMAKE_LIBTOOL ${CMAKE_LIBTOOL} CACHE PATH "libtool executable") + message(STATUS "Found libtool - ${CMAKE_LIBTOOL}") + + execute_process(COMMAND + ${CMAKE_LIBTOOL} -V + OUTPUT_VARIABLE + LIBTOOL_V_OUTPUT + OUTPUT_STRIP_TRAILING_WHITESPACE) + if("${LIBTOOL_V_OUTPUT}" MATCHES ".*cctools-([0-9]+).*") + string(REGEX REPLACE ".*cctools-([0-9]+).*" "\\1" LIBTOOL_VERSION ${LIBTOOL_V_OUTPUT}) + if(NOT LIBTOOL_VERSION VERSION_LESS "862") + set(LIBTOOL_NO_WARNING_FLAG "-no_warning_for_no_symbols") + endif() + endif() + + foreach(lang ${languages}) + set(CMAKE_${lang}_CREATE_STATIC_LIBRARY "\"${CMAKE_LIBTOOL}\" -static ${LIBTOOL_NO_WARNING_FLAG} -o ") + endforeach() + endif() + + # Workaround SIP :-( + if(DYLD_LIBRARY_PATH) + set(dyld_envar "DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}") + foreach(lang ${languages}) + foreach(cmd ${CMAKE_${lang}_CREATE_STATIC_LIBRARY}) + list(APPEND CMAKE_${lang}_CREATE_STATIC_LIBRARY_NEW "${dyld_envar} ${cmd}") + endforeach() + endforeach() + endif() + endif() + # Define default arguments to lit. set(LIT_ARGS_DEFAULT "-sv") if (MSVC OR XCODE) @@ -313,11 +368,14 @@ append_list_if(COMPILER_RT_HAS_WD4391_FLAG /wd4391 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4722_FLAG /wd4722 SANITIZER_COMMON_CFLAGS) append_list_if(COMPILER_RT_HAS_WD4800_FLAG /wd4800 SANITIZER_COMMON_CFLAGS) +append_list_if(MINGW -fms-extensions SANITIZER_COMMON_CFLAGS) + # Set common link flags. append_list_if(COMPILER_RT_HAS_NODEFAULTLIBS_FLAG -nodefaultlibs SANITIZER_COMMON_LINK_FLAGS) if (COMPILER_RT_USE_BUILTINS_LIBRARY) list(APPEND SANITIZER_COMMON_LINK_LIBS ${COMPILER_RT_BUILTINS_LIBRARY}) + string(REPLACE "-Wl,-z,defs" "" CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS}") else() if (ANDROID) append_list_if(COMPILER_RT_HAS_GCC_LIB gcc SANITIZER_COMMON_LINK_LIBS) @@ -328,14 +386,6 @@ endif() append_list_if(COMPILER_RT_HAS_LIBC c SANITIZER_COMMON_LINK_LIBS) -if(ANDROID) -# Put the Sanitizer shared libraries in the global group. For more details, see -# android-changes-for-ndk-developers.md#changes-to-library-search-order - if (COMPILER_RT_HAS_Z_GLOBAL) - list(APPEND SANITIZER_COMMON_LINK_FLAGS -Wl,-z,global) - endif() -endif() - if("${CMAKE_SYSTEM_NAME}" STREQUAL "Fuchsia") list(APPEND SANITIZER_COMMON_LINK_FLAGS -Wl,-z,defs,-z,now,-z,relro) list(APPEND SANITIZER_COMMON_LINK_LIBS zircon) diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/addsf3.S b/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/addsf3.S index 362b5c147e..74723cbeff 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/addsf3.S +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/addsf3.S @@ -178,7 +178,7 @@ LOCAL_LABEL(do_substraction): push {r0, r1, r2, r3} movs r0, r4 - bl __clzsi2 + bl SYMBOL_NAME(__clzsi2) movs r5, r0 pop {r0, r1, r2, r3} // shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3); diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S b/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S index 87dd03dce9..adc2d55d90 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S @@ -55,7 +55,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq) mov ip, #APSR_C msr APSR_nzcvq, ip #else - msr CPSR_f, #APSR_C + msr APSR_nzcvq, #APSR_C #endif JMP(lr) #endif @@ -115,11 +115,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple) movne ip, #(APSR_C) 1: -#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__) msr APSR_nzcvq, ip -#else - msr CPSR_f, ip -#endif pop {r0-r3} POP_PC() #endif diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S b/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S index c5fee6b6a0..4b1de99768 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S @@ -55,7 +55,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq) mov ip, #APSR_C msr APSR_nzcvq, ip #else - msr CPSR_f, #APSR_C + msr APSR_nzcvq, #APSR_C #endif JMP(lr) #endif @@ -115,11 +115,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple) movne ip, #(APSR_C) 1: -#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__) msr APSR_nzcvq, ip -#else - msr CPSR_f, ip -#endif pop {r0-r3} POP_PC() #endif diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/clzdi2.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/clzdi2.c index b56d98f5c0..1819e6be43 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/clzdi2.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/clzdi2.c @@ -16,8 +16,13 @@ /* Returns: the number of leading 0-bits */ -#if !defined(__clang__) && (defined(__sparc64__) || defined(__mips64) || defined(__riscv__)) -/* gcc resolves __builtin_clz -> __clzdi2 leading to infinite recursion */ +#if !defined(__clang__) && \ + ((defined(__sparc__) && defined(__arch64__)) || \ + defined(__mips64) || \ + (defined(__riscv) && __SIZEOF_POINTER__ >= 8)) +/* On 64-bit architectures with neither a native clz instruction nor a native + * ctz instruction, gcc resolves __builtin_clz to __clzdi2 rather than + * __clzsi2, leading to infinite recursion. */ #define __builtin_clz(a) __clzsi2(a) extern si_int __clzsi2(si_int); #endif diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/cpu_model.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/cpu_model.c index 43b913390f..54f1add916 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/cpu_model.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/cpu_model.c @@ -55,6 +55,9 @@ enum ProcessorTypes { AMD_BTVER2, AMDFAM17H, INTEL_KNM, + INTEL_GOLDMONT, + INTEL_GOLDMONT_PLUS, + INTEL_TREMONT, CPU_TYPE_MAX }; @@ -76,6 +79,8 @@ enum ProcessorSubtypes { INTEL_COREI7_SKYLAKE, INTEL_COREI7_SKYLAKE_AVX512, INTEL_COREI7_CANNONLAKE, + INTEL_COREI7_ICELAKE_CLIENT, + INTEL_COREI7_ICELAKE_SERVER, CPU_SUBTYPE_MAX }; @@ -110,7 +115,12 @@ enum ProcessorFeatures { FEATURE_AVX512IFMA, FEATURE_AVX5124VNNIW, FEATURE_AVX5124FMAPS, - FEATURE_AVX512VPOPCNTDQ + FEATURE_AVX512VPOPCNTDQ, + FEATURE_AVX512VBMI2, + FEATURE_GFNI, + FEATURE_VPCLMULQDQ, + FEATURE_AVX512VNNI, + FEATURE_AVX512BITALG }; // The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max). @@ -364,6 +374,14 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model, case 0x4c: // really airmont *Type = INTEL_SILVERMONT; break; // "silvermont" + // Goldmont: + case 0x5c: // Apollo Lake + case 0x5f: // Denverton + *Type = INTEL_GOLDMONT; + break; // "goldmont" + case 0x7a: + *Type = INTEL_GOLDMONT_PLUS; + break; case 0x57: *Type = INTEL_KNL; // knl @@ -438,35 +456,45 @@ static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, } static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, - unsigned *FeaturesOut) { + unsigned *FeaturesOut, + unsigned *Features2Out) { unsigned Features = 0; + unsigned Features2 = 0; unsigned EAX, EBX; +#define setFeature(F) \ + do { \ + if (F < 32) \ + Features |= 1 << F; \ + else if (F < 64) \ + Features2 |= 1 << (F - 32); \ + } while (0) + if ((EDX >> 15) & 1) - Features |= 1 << FEATURE_CMOV; + setFeature(FEATURE_CMOV); if ((EDX >> 23) & 1) - Features |= 1 << FEATURE_MMX; + setFeature(FEATURE_MMX); if ((EDX >> 25) & 1) - Features |= 1 << FEATURE_SSE; + setFeature(FEATURE_SSE); if ((EDX >> 26) & 1) - Features |= 1 << FEATURE_SSE2; + setFeature(FEATURE_SSE2); if ((ECX >> 0) & 1) - Features |= 1 << FEATURE_SSE3; + setFeature(FEATURE_SSE3); if ((ECX >> 1) & 1) - Features |= 1 << FEATURE_PCLMUL; + setFeature(FEATURE_PCLMUL); if ((ECX >> 9) & 1) - Features |= 1 << FEATURE_SSSE3; + setFeature(FEATURE_SSSE3); if ((ECX >> 12) & 1) - Features |= 1 << FEATURE_FMA; + setFeature(FEATURE_FMA); if ((ECX >> 19) & 1) - Features |= 1 << FEATURE_SSE4_1; + setFeature(FEATURE_SSE4_1); if ((ECX >> 20) & 1) - Features |= 1 << FEATURE_SSE4_2; + setFeature(FEATURE_SSE4_2); if ((ECX >> 23) & 1) - Features |= 1 << FEATURE_POPCNT; + setFeature(FEATURE_POPCNT); if ((ECX >> 25) & 1) - Features |= 1 << FEATURE_AES; + setFeature(FEATURE_AES); // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV // indicates that the AVX registers will be saved and restored on context @@ -477,43 +505,53 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0); if (HasAVX) - Features |= 1 << FEATURE_AVX; + setFeature(FEATURE_AVX); bool HasLeaf7 = MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX); if (HasLeaf7 && ((EBX >> 3) & 1)) - Features |= 1 << FEATURE_BMI; + setFeature(FEATURE_BMI); if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX) - Features |= 1 << FEATURE_AVX2; + setFeature(FEATURE_AVX2); if (HasLeaf7 && ((EBX >> 9) & 1)) - Features |= 1 << FEATURE_BMI2; + setFeature(FEATURE_BMI2); if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512F; + setFeature(FEATURE_AVX512F); if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512DQ; + setFeature(FEATURE_AVX512DQ); if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512IFMA; + setFeature(FEATURE_AVX512IFMA); if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512PF; + setFeature(FEATURE_AVX512PF); if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512ER; + setFeature(FEATURE_AVX512ER); if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512CD; + setFeature(FEATURE_AVX512CD); if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512BW; + setFeature(FEATURE_AVX512BW); if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512VL; + setFeature(FEATURE_AVX512VL); if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512VBMI; + setFeature(FEATURE_AVX512VBMI); + if (HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save) + setFeature(FEATURE_AVX512VBMI2); + if (HasLeaf7 && ((ECX >> 8) & 1)) + setFeature(FEATURE_GFNI); + if (HasLeaf7 && ((ECX >> 10) & 1) && HasAVX) + setFeature(FEATURE_VPCLMULQDQ); + if (HasLeaf7 && ((ECX >> 11) & 1) && HasAVX512Save) + setFeature(FEATURE_AVX512VNNI); + if (HasLeaf7 && ((ECX >> 12) & 1) && HasAVX512Save) + setFeature(FEATURE_AVX512BITALG); if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX512VPOPCNTDQ; + setFeature(FEATURE_AVX512VPOPCNTDQ); if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX5124VNNIW; + setFeature(FEATURE_AVX5124VNNIW); if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save) - Features |= 1 << FEATURE_AVX5124FMAPS; + setFeature(FEATURE_AVX5124FMAPS); unsigned MaxExtLevel; getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX); @@ -521,13 +559,15 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf, bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 && !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); if (HasExtLeaf1 && ((ECX >> 6) & 1)) - Features |= 1 << FEATURE_SSE4_A; + setFeature(FEATURE_SSE4_A); if (HasExtLeaf1 && ((ECX >> 11) & 1)) - Features |= 1 << FEATURE_XOP; + setFeature(FEATURE_XOP); if (HasExtLeaf1 && ((ECX >> 16) & 1)) - Features |= 1 << FEATURE_FMA4; + setFeature(FEATURE_FMA4); *FeaturesOut = Features; + *Features2Out = Features2; +#undef setFeature } #if defined(HAVE_INIT_PRIORITY) @@ -548,8 +588,9 @@ struct __processor_model { unsigned int __cpu_subtype; unsigned int __cpu_features[1]; } __cpu_model = {0, 0, 0, {0}}; +unsigned int __cpu_features2; -/* A constructor function that is sets __cpu_model and __cpu_features with +/* A constructor function that is sets __cpu_model and __cpu_features2 with the right values. This needs to run only once. This constructor is given the highest priority and it should run before constructors without the priority set. However, it still runs after ifunc initializers and @@ -562,6 +603,7 @@ __cpu_indicator_init(void) { unsigned Vendor; unsigned Model, Family, Brand_id; unsigned Features = 0; + unsigned Features2 = 0; /* This function needs to run just once. */ if (__cpu_model.__cpu_vendor) @@ -580,8 +622,9 @@ __cpu_indicator_init(void) { Brand_id = EBX & 0xff; /* Find available features. */ - getAvailableFeatures(ECX, EDX, MaxLeaf, &Features); + getAvailableFeatures(ECX, EDX, MaxLeaf, &Features, &Features2); __cpu_model.__cpu_features[0] = Features; + __cpu_features2 = Features2; if (Vendor == SIG_INTEL) { /* Get CPU type. */ diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/ctzdi2.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/ctzdi2.c index eecde29718..ef6d7fea13 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/ctzdi2.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/ctzdi2.c @@ -16,8 +16,13 @@ /* Returns: the number of trailing 0-bits */ -#if !defined(__clang__) && (defined(__sparc64__) || defined(__mips64) || defined(__riscv__)) -/* gcc resolves __builtin_ctz -> __ctzdi2 leading to infinite recursion */ +#if !defined(__clang__) && \ + ((defined(__sparc__) && defined(__arch64__)) || \ + defined(__mips64) || \ + (defined(__riscv) && __SIZEOF_POINTER__ >= 8)) +/* On 64-bit architectures with neither a native clz instruction nor a native + * ctz instruction, gcc resolves __builtin_ctz to __ctzdi2 rather than + * __ctzsi2, leading to infinite recursion. */ #define __builtin_ctz(a) __ctzsi2(a) extern si_int __ctzsi2(si_int); #endif diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/divdc3.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/divdc3.c index 3c88390b5e..392d6ecacd 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/divdc3.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/divdc3.c @@ -12,6 +12,8 @@ * ===----------------------------------------------------------------------=== */ +#define DOUBLE_PRECISION +#include "fp_lib.h" #include "int_lib.h" #include "int_math.h" @@ -21,7 +23,7 @@ COMPILER_RT_ABI Dcomplex __divdc3(double __a, double __b, double __c, double __d) { int __ilogbw = 0; - double __logbw = crt_logb(crt_fmax(crt_fabs(__c), crt_fabs(__d))); + double __logbw = __compiler_rt_logb(crt_fmax(crt_fabs(__c), crt_fabs(__d))); if (crt_isfinite(__logbw)) { __ilogbw = (int)__logbw; diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/divsc3.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/divsc3.c index 42a48315e6..0d18a256c3 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/divsc3.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/divsc3.c @@ -12,6 +12,8 @@ *===----------------------------------------------------------------------=== */ +#define SINGLE_PRECISION +#include "fp_lib.h" #include "int_lib.h" #include "int_math.h" @@ -21,7 +23,8 @@ COMPILER_RT_ABI Fcomplex __divsc3(float __a, float __b, float __c, float __d) { int __ilogbw = 0; - float __logbw = crt_logbf(crt_fmaxf(crt_fabsf(__c), crt_fabsf(__d))); + float __logbw = + __compiler_rt_logbf(crt_fmaxf(crt_fabsf(__c), crt_fabsf(__d))); if (crt_isfinite(__logbw)) { __ilogbw = (int)__logbw; diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/divtc3.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/divtc3.c index 16e538ba4a..e5ea00d841 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/divtc3.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/divtc3.c @@ -12,6 +12,8 @@ *===----------------------------------------------------------------------=== */ +#define QUAD_PRECISION +#include "fp_lib.h" #include "int_lib.h" #include "int_math.h" @@ -21,7 +23,8 @@ COMPILER_RT_ABI Lcomplex __divtc3(long double __a, long double __b, long double __c, long double __d) { int __ilogbw = 0; - long double __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d))); + long double __logbw = + __compiler_rt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d))); if (crt_isfinite(__logbw)) { __ilogbw = (int)__logbw; diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/emutls.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/emutls.c index 07d436e267..ef95a1c260 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/emutls.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/emutls.c @@ -42,6 +42,7 @@ static void emutls_shutdown(emutls_address_array *array); static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_key_t emutls_pthread_key; +static bool emutls_key_created = false; typedef unsigned int gcc_word __attribute__((mode(word))); typedef unsigned int gcc_pointer __attribute__((mode(pointer))); @@ -109,6 +110,7 @@ static void emutls_key_destructor(void* ptr) { static __inline void emutls_init(void) { if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0) abort(); + emutls_key_created = true; } static __inline void emutls_init_once(void) { @@ -390,3 +392,14 @@ void* __emutls_get_address(__emutls_control* control) { array->data[index] = emutls_allocate_object(control); return array->data[index]; } + +#ifdef __BIONIC__ +/* Called by Bionic on dlclose to delete the emutls pthread key. */ +__attribute__((visibility("hidden"))) +void __emutls_unregister_key(void) { + if (emutls_key_created) { + pthread_key_delete(emutls_pthread_key); + emutls_key_created = false; + } +} +#endif diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/fp_lib.h b/src/libcompiler_builtins/compiler-rt/lib/builtins/fp_lib.h index 223fb980aa..a0e19ab6a8 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/fp_lib.h +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/fp_lib.h @@ -25,6 +25,7 @@ #include #include #include "int_lib.h" +#include "int_math.h" // x86_64 FreeBSD prior v9.3 define fixed-width types incorrectly in // 32-bit mode. @@ -265,6 +266,62 @@ static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo, unsigned int *hi = 0; } } + +// Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids +// pulling in a libm dependency from compiler-rt, but is not meant to replace +// it (i.e. code calling logb() should get the one from libm, not this), hence +// the __compiler_rt prefix. +static __inline fp_t __compiler_rt_logbX(fp_t x) { + rep_t rep = toRep(x); + int exp = (rep & exponentMask) >> significandBits; + + // Abnormal cases: + // 1) +/- inf returns +inf; NaN returns NaN + // 2) 0.0 returns -inf + if (exp == maxExponent) { + if (((rep & signBit) == 0) || (x != x)) { + return x; // NaN or +inf: return x + } else { + return -x; // -inf: return -x + } + } else if (x == 0.0) { + // 0.0: return -inf + return fromRep(infRep | signBit); + } + + if (exp != 0) { + // Normal number + return exp - exponentBias; // Unbias exponent + } else { + // Subnormal number; normalize and repeat + rep &= absMask; + const int shift = 1 - normalize(&rep); + exp = (rep & exponentMask) >> significandBits; + return exp - exponentBias - shift; // Unbias exponent + } +} +#endif + +#if defined(SINGLE_PRECISION) +static __inline fp_t __compiler_rt_logbf(fp_t x) { + return __compiler_rt_logbX(x); +} +#elif defined(DOUBLE_PRECISION) +static __inline fp_t __compiler_rt_logb(fp_t x) { + return __compiler_rt_logbX(x); +} +#elif defined(QUAD_PRECISION) + #if defined(CRT_LDBL_128BIT) +static __inline fp_t __compiler_rt_logbl(fp_t x) { + return __compiler_rt_logbX(x); +} + #else +// The generic implementation only works for ieee754 floating point. For other +// floating point types, continue to rely on the libm implementation for now. +static __inline long double __compiler_rt_logbl(long double x) { + return crt_logbl(x); +} + #endif #endif #endif // FP_LIB_HEADER diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_lib.h b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_lib.h index 0b62666303..69523cb962 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_lib.h +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_lib.h @@ -43,7 +43,7 @@ #define AEABI_RTABI __attribute__((__pcs__("aapcs"))) -#ifdef _MSC_VER +#if defined(_MSC_VER) && !defined(__clang__) #define ALWAYS_INLINE __forceinline #define NOINLINE __declspec(noinline) #define NORETURN __declspec(noreturn) diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_math.h b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_math.h index fc81fb7f02..aa3d0721a8 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_math.h +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_math.h @@ -92,12 +92,8 @@ #endif #if defined(_MSC_VER) && !defined(__clang__) -#define crt_logb(x) logb((x)) -#define crt_logbf(x) logbf((x)) #define crt_logbl(x) logbl((x)) #else -#define crt_logb(x) __builtin_logb((x)) -#define crt_logbf(x) __builtin_logbf((x)) #define crt_logbl(x) __builtin_logbl((x)) #endif diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_types.h b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_types.h index f53f343d35..9f8da56cb7 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_types.h +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_types.h @@ -60,10 +60,19 @@ typedef union }s; } udwords; -#if (defined(__LP64__) || defined(__wasm__) || defined(__mips64)) || defined(__riscv) +#if defined(__LP64__) || defined(__wasm__) || defined(__mips64) || \ + defined(__riscv) || defined(_WIN64) #define CRT_HAS_128BIT #endif +/* MSVC doesn't have a working 128bit integer type. Users should really compile + * compiler-rt with clang, but if they happen to be doing a standalone build for + * asan or something else, disable the 128 bit parts so things sort of work. + */ +#if defined(_MSC_VER) && !defined(__clang__) +#undef CRT_HAS_128BIT +#endif + #ifdef CRT_HAS_128BIT typedef int ti_int __attribute__ ((mode (TI))); typedef unsigned tu_int __attribute__ ((mode (TI))); @@ -137,6 +146,18 @@ typedef struct #endif /* _YUGA_LITTLE_ENDIAN */ } uqwords; +/* Check if the target supports 80 bit extended precision long doubles. + * Notably, on x86 Windows, MSVC only provides a 64-bit long double, but GCC + * still makes it 80 bits. Clang will match whatever compiler it is trying to + * be compatible with. + */ +#if ((defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)) || \ + defined(__m68k__) || defined(__ia64__) +#define HAS_80_BIT_LONG_DOUBLE 1 +#else +#define HAS_80_BIT_LONG_DOUBLE 0 +#endif + typedef union { uqwords u; diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.c index de87410dbc..752f201558 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.c @@ -27,7 +27,7 @@ NORETURN extern void panic(const char *, ...); #ifndef _WIN32 __attribute__((visibility("hidden"))) #endif -void compilerrt_abort_impl(const char *file, int line, const char *function) { +void __compilerrt_abort_impl(const char *file, int line, const char *function) { panic("%s:%d: abort in %s", file, line, function); } @@ -41,7 +41,7 @@ NORETURN extern void __assert_rtn(const char *func, const char *file, int line, __attribute__((weak)) __attribute__((visibility("hidden"))) #endif -void compilerrt_abort_impl(const char *file, int line, const char *function) { +void __compilerrt_abort_impl(const char *file, int line, const char *function) { __assert_rtn(function, file, line, "libcompiler_rt abort"); } @@ -51,7 +51,7 @@ void compilerrt_abort_impl(const char *file, int line, const char *function) { __attribute__((weak)) __attribute__((visibility("hidden"))) #endif -void compilerrt_abort_impl(const char *file, int line, const char *function) { +void __compilerrt_abort_impl(const char *file, int line, const char *function) { __builtin_trap(); } @@ -64,7 +64,7 @@ void compilerrt_abort_impl(const char *file, int line, const char *function) { __attribute__((weak)) __attribute__((visibility("hidden"))) #endif -void compilerrt_abort_impl(const char *file, int line, const char *function) { +void __compilerrt_abort_impl(const char *file, int line, const char *function) { abort(); } diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.h b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.h index a7b20ed662..c3c87381ad 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.h +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/int_util.h @@ -20,10 +20,10 @@ #define INT_UTIL_H /** \brief Trigger a program abort (or panic for kernel code). */ -#define compilerrt_abort() compilerrt_abort_impl(__FILE__, __LINE__, __func__) +#define compilerrt_abort() __compilerrt_abort_impl(__FILE__, __LINE__, __func__) -NORETURN void compilerrt_abort_impl(const char *file, int line, - const char *function); +NORETURN void __compilerrt_abort_impl(const char *file, int line, + const char *function); #define COMPILE_TIME_ASSERT(expr) COMPILE_TIME_ASSERT1(expr, __COUNTER__) #define COMPILE_TIME_ASSERT1(expr, cnt) COMPILE_TIME_ASSERT2(expr, cnt) diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/os_version_check.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/os_version_check.c index 772e33333c..e0d40edc7e 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/os_version_check.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/os_version_check.c @@ -15,7 +15,6 @@ #ifdef __APPLE__ -#include #include #include #include @@ -28,6 +27,33 @@ static int32_t GlobalMajor, GlobalMinor, GlobalSubminor; static dispatch_once_t DispatchOnceCounter; +/* We can't include directly from here, so + * just forward declare everything that we need from it. */ + +typedef const void *CFDataRef, *CFAllocatorRef, *CFPropertyListRef, + *CFStringRef, *CFDictionaryRef, *CFTypeRef, *CFErrorRef; + +#if __LLP64__ +typedef unsigned long long CFTypeID; +typedef unsigned long long CFOptionFlags; +typedef signed long long CFIndex; +#else +typedef unsigned long CFTypeID; +typedef unsigned long CFOptionFlags; +typedef signed long CFIndex; +#endif + +typedef unsigned char UInt8; +typedef _Bool Boolean; +typedef CFIndex CFPropertyListFormat; +typedef uint32_t CFStringEncoding; + +/* kCFStringEncodingASCII analog. */ +#define CF_STRING_ENCODING_ASCII 0x0600 +/* kCFStringEncodingUTF8 analog. */ +#define CF_STRING_ENCODING_UTF8 0x08000100 +#define CF_PROPERTY_LIST_IMMUTABLE 0 + typedef CFDataRef (*CFDataCreateWithBytesNoCopyFuncTy)(CFAllocatorRef, const UInt8 *, CFIndex, CFAllocatorRef); @@ -55,8 +81,7 @@ static void parseSystemVersionPList(void *Unused) { const void *NullAllocator = dlsym(RTLD_DEFAULT, "kCFAllocatorNull"); if (!NullAllocator) return; - const CFAllocatorRef kCFAllocatorNull = - *(const CFAllocatorRef *)NullAllocator; + const CFAllocatorRef AllocatorNull = *(const CFAllocatorRef *)NullAllocator; CFDataCreateWithBytesNoCopyFuncTy CFDataCreateWithBytesNoCopyFunc = (CFDataCreateWithBytesNoCopyFuncTy)dlsym(RTLD_DEFAULT, "CFDataCreateWithBytesNoCopy"); @@ -140,21 +165,21 @@ static void parseSystemVersionPList(void *Unused) { /* Get the file buffer into CF's format. We pass in a null allocator here * * because we free PListBuf ourselves */ FileContentsRef = (*CFDataCreateWithBytesNoCopyFunc)( - NULL, PListBuf, (CFIndex)NumRead, kCFAllocatorNull); + NULL, PListBuf, (CFIndex)NumRead, AllocatorNull); if (!FileContentsRef) goto Fail; if (CFPropertyListCreateWithDataFunc) PListRef = (*CFPropertyListCreateWithDataFunc)( - NULL, FileContentsRef, kCFPropertyListImmutable, NULL, NULL); + NULL, FileContentsRef, CF_PROPERTY_LIST_IMMUTABLE, NULL, NULL); else PListRef = (*CFPropertyListCreateFromXMLDataFunc)( - NULL, FileContentsRef, kCFPropertyListImmutable, NULL); + NULL, FileContentsRef, CF_PROPERTY_LIST_IMMUTABLE, NULL); if (!PListRef) goto Fail; CFStringRef ProductVersion = (*CFStringCreateWithCStringNoCopyFunc)( - NULL, "ProductVersion", kCFStringEncodingASCII, kCFAllocatorNull); + NULL, "ProductVersion", CF_STRING_ENCODING_ASCII, AllocatorNull); if (!ProductVersion) goto Fail; CFTypeRef OpaqueValue = (*CFDictionaryGetValueFunc)(PListRef, ProductVersion); @@ -165,7 +190,7 @@ static void parseSystemVersionPList(void *Unused) { char VersionStr[32]; if (!(*CFStringGetCStringFunc)((CFStringRef)OpaqueValue, VersionStr, - sizeof(VersionStr), kCFStringEncodingUTF8)) + sizeof(VersionStr), CF_STRING_ENCODING_UTF8)) goto Fail; sscanf(VersionStr, "%d.%d.%d", &GlobalMajor, &GlobalMinor, &GlobalSubminor); diff --git a/src/libcompiler_builtins/compiler-rt/lib/builtins/ppc/divtc3.c b/src/libcompiler_builtins/compiler-rt/lib/builtins/ppc/divtc3.c index 8ec41c528a..ef532b8411 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/builtins/ppc/divtc3.c +++ b/src/libcompiler_builtins/compiler-rt/lib/builtins/ppc/divtc3.c @@ -4,6 +4,11 @@ #include "DD.h" #include "../int_math.h" +// Use DOUBLE_PRECISION because the soft-fp method we use is logb (on the upper +// half of the long doubles), even though this file defines complex division for +// 128-bit floats. +#define DOUBLE_PRECISION +#include "../fp_lib.h" #if !defined(CRT_INFINITY) && defined(HUGE_VAL) #define CRT_INFINITY HUGE_VAL @@ -21,9 +26,10 @@ __divtc3(long double a, long double b, long double c, long double d) DD dDD = { .ld = d }; int ilogbw = 0; - const double logbw = crt_logb(crt_fmax(crt_fabs(cDD.s.hi), crt_fabs(dDD.s.hi) )); - - if (crt_isfinite(logbw)) + const double logbw = __compiler_rt_logb( + crt_fmax(crt_fabs(cDD.s.hi), crt_fabs(dDD.s.hi))); + + if (crt_isfinite(logbw)) { ilogbw = (int)logbw; diff --git a/src/libcompiler_builtins/compiler-rt/lib/cfi/CMakeLists.txt b/src/libcompiler_builtins/compiler-rt/lib/cfi/CMakeLists.txt index 7ed72bca5d..463a1fd599 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/cfi/CMakeLists.txt +++ b/src/libcompiler_builtins/compiler-rt/lib/cfi/CMakeLists.txt @@ -1,6 +1,6 @@ add_compiler_rt_component(cfi) -if(OS_NAME MATCHES "Linux") +if(OS_NAME MATCHES "Linux" OR OS_NAME MATCHES "FreeBSD" OR OS_NAME MATCHES "NetBSD") set(CFI_SOURCES cfi.cc) include_directories(..) diff --git a/src/libcompiler_builtins/compiler-rt/lib/cfi/cfi.cc b/src/libcompiler_builtins/compiler-rt/lib/cfi/cfi.cc index a2f127f93c..b0a9437596 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/cfi/cfi.cc +++ b/src/libcompiler_builtins/compiler-rt/lib/cfi/cfi.cc @@ -13,15 +13,33 @@ #include #include + +#include "sanitizer_common/sanitizer_common.h" +#if SANITIZER_FREEBSD +#include +#endif #include #include +#include #include +#if SANITIZER_LINUX typedef ElfW(Phdr) Elf_Phdr; typedef ElfW(Ehdr) Elf_Ehdr; +typedef ElfW(Addr) Elf_Addr; +typedef ElfW(Sym) Elf_Sym; +typedef ElfW(Dyn) Elf_Dyn; +#elif SANITIZER_FREEBSD +#if SANITIZER_WORDSIZE == 64 +#define ElfW64_Dyn Elf_Dyn +#define ElfW64_Sym Elf_Sym +#else +#define ElfW32_Dyn Elf_Dyn +#define ElfW32_Sym Elf_Sym +#endif +#endif #include "interception/interception.h" -#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flag_parser.h" #include "ubsan/ubsan_init.h" #include "ubsan/ubsan_flags.h" @@ -154,15 +172,25 @@ void ShadowBuilder::Add(uptr begin, uptr end, uptr cfi_check) { *s = sv; } -#if SANITIZER_LINUX +#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD void ShadowBuilder::Install() { MprotectReadOnly(shadow_, GetShadowSize()); uptr main_shadow = GetShadow(); if (main_shadow) { // Update. +#if SANITIZER_LINUX void *res = mremap((void *)shadow_, GetShadowSize(), GetShadowSize(), MREMAP_MAYMOVE | MREMAP_FIXED, (void *)main_shadow); CHECK(res != MAP_FAILED); +#elif SANITIZER_NETBSD + void *res = mremap((void *)shadow_, GetShadowSize(), (void *)main_shadow, + GetShadowSize(), MAP_FIXED); + CHECK(res != MAP_FAILED); +#else + void *res = MmapFixedOrDie(shadow_, GetShadowSize()); + CHECK(res != MAP_FAILED); + ::memcpy(&shadow_, &main_shadow, GetShadowSize()); +#endif } else { // Initial setup. CHECK_EQ(kCfiShadowLimitsStorageSize, GetPageSizeCached()); @@ -183,17 +211,17 @@ void ShadowBuilder::Install() { // dlopen(RTLD_NOLOAD | RTLD_LAZY) // dlsym("__cfi_check"). uptr find_cfi_check_in_dso(dl_phdr_info *info) { - const ElfW(Dyn) *dynamic = nullptr; + const Elf_Dyn *dynamic = nullptr; for (int i = 0; i < info->dlpi_phnum; ++i) { if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) { dynamic = - (const ElfW(Dyn) *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); + (const Elf_Dyn *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr); break; } } if (!dynamic) return 0; uptr strtab = 0, symtab = 0, strsz = 0; - for (const ElfW(Dyn) *p = dynamic; p->d_tag != PT_NULL; ++p) { + for (const Elf_Dyn *p = dynamic; p->d_tag != PT_NULL; ++p) { if (p->d_tag == DT_SYMTAB) symtab = p->d_un.d_ptr; else if (p->d_tag == DT_STRTAB) @@ -227,7 +255,7 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) { return 0; } - for (const ElfW(Sym) *p = (const ElfW(Sym) *)symtab; (ElfW(Addr))p < strtab; + for (const Elf_Sym *p = (const Elf_Sym *)symtab; (Elf_Addr)p < strtab; ++p) { // There is no reliable way to find the end of the symbol table. In // lld-produces files, there are other sections between symtab and strtab. diff --git a/src/libcompiler_builtins/compiler-rt/lib/safestack/safestack.cc b/src/libcompiler_builtins/compiler-rt/lib/safestack/safestack.cc index 673f5fd30c..e68208015f 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/safestack/safestack.cc +++ b/src/libcompiler_builtins/compiler-rt/lib/safestack/safestack.cc @@ -183,7 +183,10 @@ static void thread_cleanup_handler(void *_iter) { thread_stack_ll **stackp = &temp_stacks; while (*stackp) { thread_stack_ll *stack = *stackp; - if (stack->pid != pid || TgKill(stack->pid, stack->tid, 0) == -ESRCH) { + int error; + if (stack->pid != pid || + (internal_iserror(TgKill(stack->pid, stack->tid, 0), &error) && + error == ESRCH)) { UnmapOrDie(stack->stack_base, stack->size); *stackp = stack->next; free(stack); diff --git a/src/libcompiler_builtins/compiler-rt/lib/ubsan_minimal/CMakeLists.txt b/src/libcompiler_builtins/compiler-rt/lib/ubsan_minimal/CMakeLists.txt index b70246845f..e0910e80f9 100644 --- a/src/libcompiler_builtins/compiler-rt/lib/ubsan_minimal/CMakeLists.txt +++ b/src/libcompiler_builtins/compiler-rt/lib/ubsan_minimal/CMakeLists.txt @@ -1,4 +1,4 @@ -# Build for the undefined behavior sanitizer runtime support library. +# Build for the minimal undefined behavior sanitizer runtime support library. set(UBSAN_MINIMAL_SOURCES ubsan_minimal_handlers.cc @@ -15,7 +15,7 @@ set(UBSAN_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS}) add_compiler_rt_component(ubsan-minimal) -# Common parts of UBSan runtime. +# Common parts of minimal UBSan runtime. add_compiler_rt_object_libraries(RTUbsan_minimal OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH} @@ -23,9 +23,7 @@ add_compiler_rt_object_libraries(RTUbsan_minimal if(COMPILER_RT_HAS_UBSAN_MINIMAL) - # Initializer of standalone UBSan runtime. - - # Standalone UBSan runtimes. + # Standalone minimal UBSan runtimes. add_compiler_rt_runtime(clang_rt.ubsan_minimal STATIC OS ${SANITIZER_COMMON_SUPPORTED_OS} diff --git a/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_ioctls.awk b/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_ioctls.awk index 9a92ff82a6..82b1992143 100755 --- a/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_ioctls.awk +++ b/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_ioctls.awk @@ -44,133 +44,13 @@ BEGIN { rootdir = ENVIRON["ROOTDIR"] } - # hardcode list of headers with ioctl(2) entries - # List generated manually with the following script: - # for w in `find /usr/include/ -type f -name '*.h' -exec echo {} \;`; \ - # do awk '/[^a-zA-Z0-9_]_IO[W]*[R]*[ ]*\(/ && $2 ~ /^[A-Z_]+$/ {got=1} END{if(got) {print ARGV[1]}}' $w; \ - # done|awk '{print " ARGV[ARGC++] = rootdir \"" substr($0, 14) "\""}' - - ARGV[ARGC++] = rootdir "altq/altq_afmap.h" - ARGV[ARGC++] = rootdir "altq/altq.h" - ARGV[ARGC++] = rootdir "altq/altq_blue.h" - ARGV[ARGC++] = rootdir "altq/altq_cbq.h" - ARGV[ARGC++] = rootdir "altq/altq_cdnr.h" - ARGV[ARGC++] = rootdir "altq/altq_fifoq.h" - ARGV[ARGC++] = rootdir "altq/altq_hfsc.h" - ARGV[ARGC++] = rootdir "altq/altq_jobs.h" - ARGV[ARGC++] = rootdir "altq/altq_priq.h" - ARGV[ARGC++] = rootdir "altq/altq_red.h" - ARGV[ARGC++] = rootdir "altq/altq_rio.h" - ARGV[ARGC++] = rootdir "altq/altq_wfq.h" - ARGV[ARGC++] = rootdir "crypto/cryptodev.h" - ARGV[ARGC++] = rootdir "dev/apm/apmio.h" - ARGV[ARGC++] = rootdir "dev/dm/netbsd-dm.h" - ARGV[ARGC++] = rootdir "dev/dmover/dmover_io.h" - ARGV[ARGC++] = rootdir "dev/dtv/dtvio_demux.h" - ARGV[ARGC++] = rootdir "dev/dtv/dtvio_frontend.h" - ARGV[ARGC++] = rootdir "dev/filemon/filemon.h" - ARGV[ARGC++] = rootdir "dev/hdaudio/hdaudioio.h" - ARGV[ARGC++] = rootdir "dev/hdmicec/hdmicecio.h" - ARGV[ARGC++] = rootdir "dev/hpc/hpcfbio.h" - ARGV[ARGC++] = rootdir "dev/i2o/iopio.h" - ARGV[ARGC++] = rootdir "dev/ic/athioctl.h" - ARGV[ARGC++] = rootdir "dev/ic/bt8xx.h" - ARGV[ARGC++] = rootdir "dev/ic/hd44780var.h" - ARGV[ARGC++] = rootdir "dev/ic/icp_ioctl.h" - ARGV[ARGC++] = rootdir "dev/ic/isp_ioctl.h" - ARGV[ARGC++] = rootdir "dev/ic/mlxio.h" - ARGV[ARGC++] = rootdir "dev/ic/nvmeio.h" - ARGV[ARGC++] = rootdir "dev/ir/irdaio.h" - ARGV[ARGC++] = rootdir "dev/isa/satlinkio.h" - ARGV[ARGC++] = rootdir "dev/isa/isvio.h" - ARGV[ARGC++] = rootdir "dev/isa/wtreg.h" - ARGV[ARGC++] = rootdir "dev/iscsi/iscsi_ioctl.h" - ARGV[ARGC++] = rootdir "dev/ofw/openfirmio.h" - ARGV[ARGC++] = rootdir "dev/pci/amrio.h" - ARGV[ARGC++] = rootdir "dev/pci/mlyio.h" - ARGV[ARGC++] = rootdir "dev/pci/pciio.h" - ARGV[ARGC++] = rootdir "dev/pci/tweio.h" - ARGV[ARGC++] = rootdir "dev/pcmcia/if_cnwioctl.h" - ARGV[ARGC++] = rootdir "dev/pcmcia/if_rayreg.h" - ARGV[ARGC++] = rootdir "dev/raidframe/raidframeio.h" - ARGV[ARGC++] = rootdir "dev/sbus/mbppio.h" - ARGV[ARGC++] = rootdir "dev/scsipi/ses.h" - ARGV[ARGC++] = rootdir "dev/sun/disklabel.h" - ARGV[ARGC++] = rootdir "dev/sun/fbio.h" - ARGV[ARGC++] = rootdir "dev/sun/kbio.h" - ARGV[ARGC++] = rootdir "dev/sun/vuid_event.h" - ARGV[ARGC++] = rootdir "dev/tc/sticio.h" - ARGV[ARGC++] = rootdir "dev/usb/ukyopon.h" - ARGV[ARGC++] = rootdir "dev/usb/urio.h" - ARGV[ARGC++] = rootdir "dev/usb/usb.h" - ARGV[ARGC++] = rootdir "dev/usb/utoppy.h" - ARGV[ARGC++] = rootdir "dev/vme/xio.h" - ARGV[ARGC++] = rootdir "dev/wscons/wsdisplay_usl_io.h" - ARGV[ARGC++] = rootdir "dev/wscons/wsconsio.h" - ARGV[ARGC++] = rootdir "dev/biovar.h" - ARGV[ARGC++] = rootdir "dev/md.h" - ARGV[ARGC++] = rootdir "dev/ccdvar.h" - ARGV[ARGC++] = rootdir "dev/cgdvar.h" - ARGV[ARGC++] = rootdir "dev/fssvar.h" - ARGV[ARGC++] = rootdir "dev/bluetooth/btdev.h" - ARGV[ARGC++] = rootdir "dev/bluetooth/btsco.h" - ARGV[ARGC++] = rootdir "dev/kttcpio.h" - ARGV[ARGC++] = rootdir "dev/lockstat.h" - ARGV[ARGC++] = rootdir "dev/vndvar.h" - ARGV[ARGC++] = rootdir "dev/spkrio.h" - ARGV[ARGC++] = rootdir "net/bpf.h" - ARGV[ARGC++] = rootdir "net/if_atm.h" - ARGV[ARGC++] = rootdir "net/if_gre.h" - ARGV[ARGC++] = rootdir "net/if_ppp.h" - ARGV[ARGC++] = rootdir "net/npf.h" - ARGV[ARGC++] = rootdir "net/if_pppoe.h" - ARGV[ARGC++] = rootdir "net/if_sppp.h" - ARGV[ARGC++] = rootdir "net/if_srt.h" - ARGV[ARGC++] = rootdir "net/if_tap.h" - ARGV[ARGC++] = rootdir "net/if_tun.h" - ARGV[ARGC++] = rootdir "net/pfvar.h" - ARGV[ARGC++] = rootdir "net/slip.h" - ARGV[ARGC++] = rootdir "netbt/hci.h" - ARGV[ARGC++] = rootdir "netinet/ip_nat.h" - ARGV[ARGC++] = rootdir "netinet/ip_proxy.h" - ARGV[ARGC++] = rootdir "netinet6/in6_var.h" - ARGV[ARGC++] = rootdir "netnatm/natm.h" - ARGV[ARGC++] = rootdir "netsmb/smb_dev.h" - ARGV[ARGC++] = rootdir "sys/agpio.h" - ARGV[ARGC++] = rootdir "sys/audioio.h" - ARGV[ARGC++] = rootdir "sys/ataio.h" - ARGV[ARGC++] = rootdir "sys/cdio.h" - ARGV[ARGC++] = rootdir "sys/chio.h" - ARGV[ARGC++] = rootdir "sys/clockctl.h" - ARGV[ARGC++] = rootdir "sys/cpuio.h" - ARGV[ARGC++] = rootdir "sys/dkio.h" - ARGV[ARGC++] = rootdir "sys/drvctlio.h" - ARGV[ARGC++] = rootdir "sys/dvdio.h" - ARGV[ARGC++] = rootdir "sys/envsys.h" - ARGV[ARGC++] = rootdir "sys/event.h" - ARGV[ARGC++] = rootdir "sys/fdio.h" - ARGV[ARGC++] = rootdir "sys/filio.h" - ARGV[ARGC++] = rootdir "sys/gpio.h" - ARGV[ARGC++] = rootdir "sys/ioctl.h" - ARGV[ARGC++] = rootdir "sys/ioctl_compat.h" - ARGV[ARGC++] = rootdir "sys/joystick.h" - ARGV[ARGC++] = rootdir "sys/ksyms.h" - ARGV[ARGC++] = rootdir "sys/lua.h" - ARGV[ARGC++] = rootdir "sys/midiio.h" - ARGV[ARGC++] = rootdir "sys/mtio.h" - ARGV[ARGC++] = rootdir "sys/power.h" - ARGV[ARGC++] = rootdir "sys/radioio.h" - ARGV[ARGC++] = rootdir "sys/rndio.h" - ARGV[ARGC++] = rootdir "sys/scanio.h" - ARGV[ARGC++] = rootdir "sys/scsiio.h" - ARGV[ARGC++] = rootdir "sys/sockio.h" - ARGV[ARGC++] = rootdir "sys/timepps.h" - ARGV[ARGC++] = rootdir "sys/ttycom.h" - ARGV[ARGC++] = rootdir "sys/verified_exec.h" - ARGV[ARGC++] = rootdir "sys/videoio.h" - ARGV[ARGC++] = rootdir "sys/wdog.h" - ARGV[ARGC++] = rootdir "soundcard.h" - ARGV[ARGC++] = rootdir "xen/xenio.h" + # detect and register files to detect ioctl() definitions + ARGC = 1 + cmd = "find " rootdir " -type f -name '*.h'" + while (cmd | getline) { + ARGV[ARGC++] = $0 + } + close(cmd) ioctl_table_max = 0 } @@ -314,6 +194,7 @@ FNR == 1 { $0 ~ /PRIQ_IF_DETACH/ || $0 ~ /PRIQ_ENABLE/ || $0 ~ /WFQ_IF_ATTACH/ || + $0 ~ /POWER_IOC_GET_TYPE_WITH_LOSSAGE/ || $0 ~ /HFSC_DEL_FILTER/) { # There are entries with duplicate codes.. disable the less used ones next @@ -345,17 +226,12 @@ FNR == 1 { # This !NONE check allows to skip some unparsable entries if (ioctl_mode[ioctl_table_max] != "NONE") { - # special cases first - if ($0 ~ /POWER_IOC_GET_TYPE_WITH_LOSSAGE/) { - ioctl_type[ioctl_table_max] = "sizeof(uptr)" - } else { - n = split($0, a, ",") - if (n == 3) { - gsub(/^[ ]+/, "", a[3]) - match(a[3], /[a-zA-Z0-9_* ]+/) - type = get_type(substr(a[3], 0, RLENGTH)) - ioctl_type[ioctl_table_max] = type - } + n = split($0, a, ",") + if (n == 3) { + gsub(/^[ ]+/, "", a[3]) + match(a[3], /[a-zA-Z0-9_* ]+/) + type = get_type(substr(a[3], 0, RLENGTH)) + ioctl_type[ioctl_table_max] = type } } @@ -748,6 +624,8 @@ function get_type(string) return "struct_RF_SingleComponent_sz" } else if (string == "RF_ProgressInfo_t") { return "struct_RF_ProgressInfo_sz" + } else if (string == "nvlist_ref_t") { + return "struct_nvlist_ref_sz" } else { print "Unrecognized entry: " string print "Aborting" diff --git a/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_syscalls.awk b/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_syscalls.awk index 5e08900a17..ac08025460 100755 --- a/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_syscalls.awk +++ b/src/libcompiler_builtins/compiler-rt/utils/generate_netbsd_syscalls.awk @@ -1464,6 +1464,8 @@ function syscall_body(syscall, mode) pcmd(" }") pcmd("}") } + } else if (syscall == "getsockopt2") { + pcmd("/* TODO */") } else if (syscall == "fpathconf") { pcmd("/* Nothing to do */") } else if (syscall == "getrlimit") { @@ -1982,10 +1984,6 @@ function syscall_body(syscall, mode) pcmd("if (nsa_) {") pcmd(" PRE_READ(nsa_, sizeof(__sanitizer_sigaction));") pcmd("}") - } else if (syscall == "pmc_get_info") { - pcmd("/* TODO */") - } else if (syscall == "pmc_control") { - pcmd("/* TODO */") } else if (syscall == "rasctl") { pcmd("/* Nothing to do */") } else if (syscall == "kqueue") { @@ -2935,16 +2933,14 @@ function syscall_body(syscall, mode) } else if (syscall == "sendmmsg") { if (mode == "pre") { pcmd("struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;") - pcmd("unsigned int vlen = (vlen_ > 1024 ? 1024 : vlen_);") pcmd("if (mmsg) {") - pcmd(" PRE_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) * vlen);") + pcmd(" PRE_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) * (vlen_ > 1024 ? 1024 : vlen_));") pcmd("}") } else { pcmd("struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;") - pcmd("unsigned int vlen = (vlen_ > 1024 ? 1024 : vlen_);") pcmd("if (res >= 0) {") pcmd(" if (mmsg) {") - pcmd(" POST_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) * vlen);") + pcmd(" POST_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) * (vlen_ > 1024 ? 1024 : vlen_));") pcmd(" }") pcmd("}") } diff --git a/src/libcompiler_builtins/crates/panic-handler/src/lib.rs b/src/libcompiler_builtins/crates/panic-handler/src/lib.rs index e970610873..a75999a4b6 100644 --- a/src/libcompiler_builtins/crates/panic-handler/src/lib.rs +++ b/src/libcompiler_builtins/crates/panic-handler/src/lib.rs @@ -1,6 +1,6 @@ // Hack of a crate until rust-lang/rust#51647 is fixed -#![feature(no_core, panic_handler)] +#![feature(no_core)] #![no_core] extern crate core; diff --git a/src/libcompiler_builtins/examples/intrinsics.rs b/src/libcompiler_builtins/examples/intrinsics.rs index a5be573944..1c498176c1 100644 --- a/src/libcompiler_builtins/examples/intrinsics.rs +++ b/src/libcompiler_builtins/examples/intrinsics.rs @@ -11,7 +11,6 @@ #![feature(lang_items)] #![feature(start)] #![feature(allocator_api)] -#![feature(panic_handler)] #![cfg_attr(windows, feature(panic_unwind))] #![no_std] diff --git a/src/libcompiler_builtins/libm/.travis.yml b/src/libcompiler_builtins/libm/.travis.yml index 922273e093..7583161781 100644 --- a/src/libcompiler_builtins/libm/.travis.yml +++ b/src/libcompiler_builtins/libm/.travis.yml @@ -5,21 +5,36 @@ sudo: required matrix: include: - env: TARGET=aarch64-unknown-linux-gnu - - env: TARGET=armv7-unknown-linux-gnueabihf - - env: TARGET=i686-unknown-linux-gnu - - env: TARGET=mips-unknown-linux-gnu - - env: TARGET=mips64-unknown-linux-gnuabi64 - - env: TARGET=mips64el-unknown-linux-gnuabi64 - - env: TARGET=mipsel-unknown-linux-gnu - - env: TARGET=powerpc-unknown-linux-gnu - - env: TARGET=powerpc64-unknown-linux-gnu - - env: TARGET=powerpc64le-unknown-linux-gnu - - env: TARGET=x86_64-unknown-linux-gnu - - env: TARGET=cargo-fmt - rust: beta - # no-panic link test + rust: nightly - env: TARGET=armv7-unknown-linux-gnueabihf rust: nightly + - env: TARGET=i686-unknown-linux-gnu + rust: nightly + - env: TARGET=mips-unknown-linux-gnu + rust: nightly + - env: TARGET=mips64-unknown-linux-gnuabi64 + rust: nightly + - env: TARGET=mips64el-unknown-linux-gnuabi64 + rust: nightly + - env: TARGET=mipsel-unknown-linux-gnu + rust: nightly + - env: TARGET=powerpc-unknown-linux-gnu + rust: nightly + - env: TARGET=powerpc64-unknown-linux-gnu + rust: nightly + - env: TARGET=powerpc64le-unknown-linux-gnu + rust: nightly + - env: TARGET=x86_64-unknown-linux-gnu + rust: nightly + - env: TARGET=cargo-fmt + rust: beta + + - env: TARGET=wasm32-unknown-unknown + rust: nightly + install: rustup target add $TARGET + script: + - cargo build --target $TARGET + - cargo build --no-default-features --target $TARGET before_install: set -e @@ -27,6 +42,7 @@ install: - bash ci/install.sh script: + - export PATH=$HOME/.local/bin:$PATH - bash ci/script.sh after_script: set +e diff --git a/src/libcompiler_builtins/libm/Cargo.toml b/src/libcompiler_builtins/libm/Cargo.toml index 8a2ba7447a..f28024d041 100644 --- a/src/libcompiler_builtins/libm/Cargo.toml +++ b/src/libcompiler_builtins/libm/Cargo.toml @@ -12,6 +12,17 @@ version = "0.1.2" [features] # only used to run our test suite checked = [] +default = ['stable'] +stable = [] [workspace] -members = ["cb", "test-generator"] \ No newline at end of file +members = [ + "cb", + "input-generator", + "musl-generator", + "newlib-generator", + "shared", +] + +[dev-dependencies] +shared = { path = "shared" } diff --git a/src/libcompiler_builtins/libm/ci/install.sh b/src/libcompiler_builtins/libm/ci/install.sh index 4d9552d231..af26e2d4c3 100644 --- a/src/libcompiler_builtins/libm/ci/install.sh +++ b/src/libcompiler_builtins/libm/ci/install.sh @@ -15,6 +15,11 @@ main() { if [ $TARGET != x86_64-unknown-linux-gnu ]; then rustup target add $TARGET fi + + mkdir -p ~/.local/bin + curl -L https://github.com/japaric/qemu-bin/raw/master/14.04/qemu-arm-2.12.0 > ~/.local/bin/qemu-arm + chmod +x ~/.local/bin/qemu-arm + qemu-arm --version } main diff --git a/src/libcompiler_builtins/libm/ci/script.sh b/src/libcompiler_builtins/libm/ci/script.sh index bb19e23d84..c3b6faa6ca 100644 --- a/src/libcompiler_builtins/libm/ci/script.sh +++ b/src/libcompiler_builtins/libm/ci/script.sh @@ -6,24 +6,27 @@ main() { return fi - # test that the functions don't contain invocations of `panic!` - if [ $TRAVIS_RUST_VERSION = nightly ]; then - cross build --release --target $TARGET --example no-panic - return - fi - # quick check cargo check # check that we can source import libm into compiler-builtins cargo check --package cb + # generate tests + cargo run -p input-generator --target x86_64-unknown-linux-musl + cargo run -p musl-generator --target x86_64-unknown-linux-musl + cargo run -p newlib-generator + + # test that the functions don't contain invocations of `panic!` + case $TARGET in + armv7-unknown-linux-gnueabihf) + cross build --release --target $TARGET --example no-panic + ;; + esac + # run unit tests cross test --lib --features checked --target $TARGET --release - # generate tests - cargo run --package test-generator --target x86_64-unknown-linux-musl - # run generated tests cross test --tests --features checked --target $TARGET --release diff --git a/src/libcompiler_builtins/libm/test-generator/Cargo.toml b/src/libcompiler_builtins/libm/input-generator/Cargo.toml similarity index 55% rename from src/libcompiler_builtins/libm/test-generator/Cargo.toml rename to src/libcompiler_builtins/libm/input-generator/Cargo.toml index b810d9daf6..fef2558a8b 100644 --- a/src/libcompiler_builtins/libm/test-generator/Cargo.toml +++ b/src/libcompiler_builtins/libm/input-generator/Cargo.toml @@ -1,9 +1,7 @@ [package] -name = "test-generator" +name = "input-generator" version = "0.1.0" authors = ["Jorge Aparicio "] -publish = false [dependencies] -rand = "0.5.3" -itertools = "0.7.8" +rand = "0.5.4" diff --git a/src/libcompiler_builtins/libm/input-generator/src/main.rs b/src/libcompiler_builtins/libm/input-generator/src/main.rs new file mode 100644 index 0000000000..b4a6ad1422 --- /dev/null +++ b/src/libcompiler_builtins/libm/input-generator/src/main.rs @@ -0,0 +1,189 @@ +extern crate rand; + +use std::collections::BTreeSet; +use std::error::Error; +use std::fs::{self, File}; +use std::io::Write; + +use rand::{RngCore, SeedableRng, XorShiftRng}; + +const NTESTS: usize = 10_000; + +fn main() -> Result<(), Box> { + let mut rng = XorShiftRng::from_rng(&mut rand::thread_rng())?; + + fs::remove_dir_all("bin").ok(); + fs::create_dir_all("bin/input")?; + fs::create_dir_all("bin/output")?; + + f32(&mut rng)?; + f32f32(&mut rng)?; + f32f32f32(&mut rng)?; + f32i16(&mut rng)?; + f64(&mut rng)?; + f64f64(&mut rng)?; + f64f64f64(&mut rng)?; + f64i16(&mut rng)?; + + Ok(()) +} + +fn f32(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut set = BTreeSet::new(); + + while set.len() < NTESTS { + let f = f32::from_bits(rng.next_u32()); + + if f.is_nan() { + continue; + } + + set.insert(f.to_bits()); + } + + let mut f = File::create("bin/input/f32")?; + for i in set { + f.write_all(&i.to_bytes())?; + } + + Ok(()) +} + +fn f32f32(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut f = File::create("bin/input/f32f32")?; + let mut i = 0; + while i < NTESTS { + let x0 = f32::from_bits(rng.next_u32()); + let x1 = f32::from_bits(rng.next_u32()); + + if x0.is_nan() || x1.is_nan() { + continue; + } + + i += 1; + f.write_all(&x0.to_bits().to_bytes())?; + f.write_all(&x1.to_bits().to_bytes())?; + } + + Ok(()) +} + +fn f32i16(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut f = File::create("bin/input/f32i16")?; + let mut i = 0; + while i < NTESTS { + let x0 = f32::from_bits(rng.next_u32()); + let x1 = rng.next_u32() as i16; + + if x0.is_nan() { + continue; + } + + i += 1; + f.write_all(&x0.to_bits().to_bytes())?; + f.write_all(&x1.to_bytes())?; + } + + Ok(()) +} + +fn f32f32f32(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut f = File::create("bin/input/f32f32f32")?; + let mut i = 0; + while i < NTESTS { + let x0 = f32::from_bits(rng.next_u32()); + let x1 = f32::from_bits(rng.next_u32()); + let x2 = f32::from_bits(rng.next_u32()); + + if x0.is_nan() || x1.is_nan() || x2.is_nan() { + continue; + } + + i += 1; + f.write_all(&x0.to_bits().to_bytes())?; + f.write_all(&x1.to_bits().to_bytes())?; + f.write_all(&x2.to_bits().to_bytes())?; + } + + Ok(()) +} + +fn f64(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut set = BTreeSet::new(); + + while set.len() < NTESTS { + let f = f64::from_bits(rng.next_u64()); + + if f.is_nan() { + continue; + } + + set.insert(f.to_bits()); + } + + let mut f = File::create("bin/input/f64")?; + for i in set { + f.write_all(&i.to_bytes())?; + } + + Ok(()) +} + +fn f64f64(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut f = File::create("bin/input/f64f64")?; + let mut i = 0; + while i < NTESTS { + let x0 = f64::from_bits(rng.next_u64()); + let x1 = f64::from_bits(rng.next_u64()); + + if x0.is_nan() || x1.is_nan() { + continue; + } + + i += 1; + f.write_all(&x0.to_bits().to_bytes())?; + f.write_all(&x1.to_bits().to_bytes())?; + } + + Ok(()) +} + +fn f64f64f64(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut f = File::create("bin/input/f64f64f64")?; + let mut i = 0; + while i < NTESTS { + let x0 = f64::from_bits(rng.next_u64()); + let x1 = f64::from_bits(rng.next_u64()); + let x2 = f64::from_bits(rng.next_u64()); + + if x0.is_nan() || x1.is_nan() || x2.is_nan() { + continue; + } + + i += 1; + f.write_all(&x0.to_bits().to_bytes())?; + f.write_all(&x1.to_bits().to_bytes())?; + f.write_all(&x2.to_bits().to_bytes())?; + } + + Ok(()) +} + +fn f64i16(rng: &mut XorShiftRng) -> Result<(), Box> { + let mut f = File::create("bin/input/f64i16")?; + let mut i = 0; + while i < NTESTS { + let x0 = f64::from_bits(rng.next_u64()); + let x1 = rng.next_u32() as i16; + + if x0.is_nan() { + continue; + } + + i += 1; + f.write_all(&x0.to_bits().to_bytes())?; + f.write_all(&x1.to_bytes())?; + } + + Ok(()) +} diff --git a/src/libcompiler_builtins/libm/math/.cargo/config b/src/libcompiler_builtins/libm/math/.cargo/config new file mode 100644 index 0000000000..be79c453ad --- /dev/null +++ b/src/libcompiler_builtins/libm/math/.cargo/config @@ -0,0 +1,11 @@ +[target.thumbv7em-none-eabi] +rustflags = [ + "-C", "link-arg=-Wl,-Tlink.x", + "-C", "link-arg=-nostartfiles", + "-C", "link-arg=-mthumb", + "-C", "link-arg=-march=armv7e-m", + "-C", "link-arg=-mfloat-abi=soft", +] + +[build] +target = "thumbv7em-none-eabi" \ No newline at end of file diff --git a/src/libcompiler_builtins/libm/math/Cargo.toml b/src/libcompiler_builtins/libm/math/Cargo.toml new file mode 100644 index 0000000000..5bca038a9c --- /dev/null +++ b/src/libcompiler_builtins/libm/math/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "math" +version = "0.0.0" + +[dependencies] +qemu-arm-rt = { git = "https://github.com/japaric/qemu-arm-rt" } + +[workspace] \ No newline at end of file diff --git a/src/libcompiler_builtins/libm/math/Cross.toml b/src/libcompiler_builtins/libm/math/Cross.toml new file mode 100644 index 0000000000..471770b528 --- /dev/null +++ b/src/libcompiler_builtins/libm/math/Cross.toml @@ -0,0 +1,2 @@ +[target.thumbv7em-none-eabi] +xargo = false \ No newline at end of file diff --git a/src/libcompiler_builtins/libm/musl-generator/Cargo.toml b/src/libcompiler_builtins/libm/musl-generator/Cargo.toml new file mode 100644 index 0000000000..0564f35361 --- /dev/null +++ b/src/libcompiler_builtins/libm/musl-generator/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "musl-generator" +version = "0.1.0" +authors = ["Jorge Aparicio "] + +[dependencies] +lazy_static = "1.0.2" +shared = { path = "../shared" } +libm = { path = ".." } diff --git a/src/libcompiler_builtins/libm/musl-generator/src/macros.rs b/src/libcompiler_builtins/libm/musl-generator/src/macros.rs new file mode 100644 index 0000000000..16ba99d640 --- /dev/null +++ b/src/libcompiler_builtins/libm/musl-generator/src/macros.rs @@ -0,0 +1,191 @@ +macro_rules! f32 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f32) -> f32 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for x in shared::F32.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f32) -> f32; + } + + $fun(*x) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f32f32 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f32, f32) -> f32 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for (x0, x1) in shared::F32F32.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f32, _: f32) -> f32; + } + + $fun(*x0, *x1) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f32f32f32 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f32, f32, f32) -> f32 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for (x0, x1, x2) in shared::F32F32F32.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f32, _: f32, _: f32) -> f32; + } + + $fun(*x0, *x1, *x2) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f32i32 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f32, i32) -> f32 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for (x0, x1) in shared::F32I32.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f32, _: i32) -> f32; + } + + $fun(*x0, *x1 as i32) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f64 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f64) -> f64 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for x in shared::F64.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f64) -> f64; + } + + $fun(*x) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f64f64 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f64, f64) -> f64 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for (x0, x1) in shared::F64F64.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f64, _: f64) -> f64; + } + + $fun(*x0, *x1) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f64f64f64 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f64, f64, f64) -> f64 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for (x0, x1, x2) in shared::F64F64F64.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f64, _: f64, _: f64) -> f64; + } + + $fun(*x0, *x1, *x2) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} + +macro_rules! f64i32 { + ($($fun:ident,)+) => {{ + $( + // check type signature + let _: fn(f64, i32) -> f64 = libm::$fun; + let mut $fun = File::create(concat!("bin/output/musl.", stringify!($fun)))?; + )+ + + for (x0, x1) in shared::F64I32.iter() { + $( + let y = unsafe { + extern "C" { + fn $fun(_: f64, _: i32) -> f64; + } + + $fun(*x0, *x1 as i32) + }; + + $fun.write_all(&y.to_bits().to_bytes())?; + )+ + } + }}; +} diff --git a/src/libcompiler_builtins/libm/musl-generator/src/main.rs b/src/libcompiler_builtins/libm/musl-generator/src/main.rs new file mode 100644 index 0000000000..6e57e856dc --- /dev/null +++ b/src/libcompiler_builtins/libm/musl-generator/src/main.rs @@ -0,0 +1,97 @@ +extern crate libm; +extern crate shared; + +use std::error::Error; +use std::fs::File; +use std::io::Write; + +#[macro_use] +mod macros; + +fn main() -> Result<(), Box> { + f32! { + acosf, + asinf, + atanf, + cbrtf, + ceilf, + cosf, + coshf, + exp2f, + expf, + expm1f, + fabsf, + floorf, + log10f, + log1pf, + log2f, + logf, + roundf, + sinf, + sinhf, + sqrtf, + tanf, + tanhf, + truncf, + } + + f32f32! { + atan2f, + fdimf, + fmodf, + hypotf, + powf, + } + + f32i32! { + scalbnf, + } + + f32f32f32! { + fmaf, + } + + f64! { + acos, + asin, + atan, + cbrt, + ceil, + cos, + cosh, + exp, + exp2, + expm1, + fabs, + floor, + log, + log10, + log1p, + log2, + round, + sin, + sinh, + sqrt, + tan, + tanh, + trunc, + } + + f64f64! { + atan2, + fdim, + fmod, + hypot, + pow, + } + + f64i32! { + scalbn, + } + + f64f64f64! { + fma, + } + + Ok(()) +} diff --git a/src/libcompiler_builtins/libm/newlib-generator/Cargo.toml b/src/libcompiler_builtins/libm/newlib-generator/Cargo.toml new file mode 100644 index 0000000000..5766cb4b7a --- /dev/null +++ b/src/libcompiler_builtins/libm/newlib-generator/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "newlib-generator" +version = "0.1.0" +authors = ["Jorge Aparicio "] + +[dependencies] +shared = { path = "../shared" } diff --git a/src/libcompiler_builtins/libm/newlib-generator/src/macros.rs b/src/libcompiler_builtins/libm/newlib-generator/src/macros.rs new file mode 100644 index 0000000000..84315a777b --- /dev/null +++ b/src/libcompiler_builtins/libm/newlib-generator/src/macros.rs @@ -0,0 +1,245 @@ +macro_rules! f32 { + ($($fun:ident,)+) => { + $( + let fun = stringify!($fun); + + fs::create_dir_all("math/src")?; + + let main = format!(" +#![no_main] +#![no_std] + +#[macro_use] +extern crate qemu_arm_rt as rt; + +use core::u32; + +use rt::{{io, process}}; + +entry!(main); + +fn main() {{ + run().unwrap_or_else(|e| {{ + eprintln!(\"error: {{}}\", e); + process::exit(1); + }}) +}} + +fn run() -> Result<(), usize> {{ + #[link(name = \"m\")] + extern \"C\" {{ + fn {0}(_: f32) -> f32; + }} + + let mut buf = [0; 4]; + while let Ok(()) = io::Stdin.read_exact(&mut buf) {{ + let x = f32::from_bits(u32::from_bytes(buf)); + let y = unsafe {{ {0}(x) }}; + + io::Stdout.write_all(&y.to_bits().to_bytes())?; + }} + + Ok(()) +}} + +#[no_mangle] +pub fn __errno() -> *mut i32 {{ + static mut ERRNO: i32 = 0; + unsafe {{ &mut ERRNO }} +}} +", fun); + + File::create("math/src/main.rs")?.write_all(main.as_bytes())?; + + assert!( + Command::new("cross") + .args(&["build", "--target", "thumbv7em-none-eabi", "--release"]) + .current_dir("math") + .status()? + .success() + ); + + let mut qemu = Command::new("qemu-arm") + .arg("math/target/thumbv7em-none-eabi/release/math") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn()?; + + qemu.stdin.as_mut().take().unwrap().write_all(F32)?; + + let output = qemu.wait_with_output()?; + + File::create(concat!("bin/output/newlib.", stringify!($fun)))? + .write_all(&output.stdout)?; + )+ + } +} + +macro_rules! f32f32 { + ($($fun:ident,)+) => { + $( + let fun = stringify!($fun); + + fs::create_dir_all("math/src")?; + + let main = format!(" +#![no_main] +#![no_std] + +#[macro_use] +extern crate qemu_arm_rt as rt; + +use core::u32; + +use rt::{{io, process}}; + +entry!(main); + +fn main() {{ + run().unwrap_or_else(|e| {{ + eprintln!(\"error: {{}}\", e); + process::exit(1); + }}) +}} + +fn run() -> Result<(), usize> {{ + #[link(name = \"m\")] + extern \"C\" {{ + fn {0}(_: f32, _: f32) -> f32; + }} + + let mut chunk = [0; 8]; + while let Ok(()) = io::Stdin.read_exact(&mut chunk) {{ + let mut buf = [0; 4]; + buf.copy_from_slice(&chunk[..4]); + let x0 = f32::from_bits(u32::from_bytes(buf)); + + buf.copy_from_slice(&chunk[4..]); + let x1 = f32::from_bits(u32::from_bytes(buf)); + + let y = unsafe {{ {0}(x0, x1) }}; + + io::Stdout.write_all(&y.to_bits().to_bytes())?; + }} + + Ok(()) +}} + +#[no_mangle] +pub fn __errno() -> *mut i32 {{ + static mut ERRNO: i32 = 0; + unsafe {{ &mut ERRNO }} +}} +", fun); + + File::create("math/src/main.rs")?.write_all(main.as_bytes())?; + + assert!( + Command::new("cross") + .args(&["build", "--target", "thumbv7em-none-eabi", "--release"]) + .current_dir("math") + .status()? + .success() + ); + + let mut qemu = Command::new("qemu-arm") + .arg("math/target/thumbv7em-none-eabi/release/math") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn()?; + + qemu.stdin.as_mut().take().unwrap().write_all(F32)?; + + let output = qemu.wait_with_output()?; + + File::create(concat!("bin/output/newlib.", stringify!($fun)))? + .write_all(&output.stdout)?; + )+ + } +} + +macro_rules! f32f32f32 { + ($($fun:ident,)+) => { + $( + let fun = stringify!($fun); + + fs::create_dir_all("math/src")?; + + let main = format!(" +#![no_main] +#![no_std] + +#[macro_use] +extern crate qemu_arm_rt as rt; + +use core::u32; + +use rt::{{io, process}}; + +entry!(main); + +fn main() {{ + run().unwrap_or_else(|e| {{ + eprintln!(\"error: {{}}\", e); + process::exit(1); + }}) +}} + +fn run() -> Result<(), usize> {{ + #[link(name = \"m\")] + extern \"C\" {{ + fn {0}(_: f32, _: f32, _: f32) -> f32; + }} + + let mut chunk = [0; 12]; + while let Ok(()) = io::Stdin.read_exact(&mut chunk) {{ + let mut buf = [0; 4]; + buf.copy_from_slice(&chunk[..4]); + let x0 = f32::from_bits(u32::from_bytes(buf)); + + buf.copy_from_slice(&chunk[4..8]); + let x1 = f32::from_bits(u32::from_bytes(buf)); + + buf.copy_from_slice(&chunk[8..]); + let x2 = f32::from_bits(u32::from_bytes(buf)); + + let y = unsafe {{ {0}(x0, x1, x2) }}; + + io::Stdout.write_all(&y.to_bits().to_bytes())?; + }} + + Ok(()) +}} + +#[no_mangle] +pub fn __errno() -> *mut i32 {{ + static mut ERRNO: i32 = 0; + unsafe {{ &mut ERRNO }} +}} +", fun); + + File::create("math/src/main.rs")?.write_all(main.as_bytes())?; + + assert!( + Command::new("cross") + .args(&["build", "--target", "thumbv7em-none-eabi", "--release"]) + .current_dir("math") + .status()? + .success() + ); + + let mut qemu = Command::new("qemu-arm") + .arg("math/target/thumbv7em-none-eabi/release/math") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn()?; + + qemu.stdin.as_mut().take().unwrap().write_all(F32)?; + + let output = qemu.wait_with_output()?; + + File::create(concat!("bin/output/newlib.", stringify!($fun)))? + .write_all(&output.stdout)?; + )+ + } +} diff --git a/src/libcompiler_builtins/libm/newlib-generator/src/main.rs b/src/libcompiler_builtins/libm/newlib-generator/src/main.rs new file mode 100644 index 0000000000..52a97cabbf --- /dev/null +++ b/src/libcompiler_builtins/libm/newlib-generator/src/main.rs @@ -0,0 +1,32 @@ +extern crate shared; + +use std::error::Error; +use std::fs::{self, File}; +use std::io::Write; +use std::process::{Command, Stdio}; + +#[macro_use] +mod macros; + +fn main() -> Result<(), Box> { + const F32: &[u8] = include_bytes!("../../bin/input/f32"); + + f32! { + asinf, + cbrtf, + cosf, + exp2f, + sinf, + tanf, + } + + f32f32! { + hypotf, + } + + f32f32f32! { + fmaf, + } + + Ok(()) +} diff --git a/src/libcompiler_builtins/libm/shared/Cargo.toml b/src/libcompiler_builtins/libm/shared/Cargo.toml new file mode 100644 index 0000000000..d778237810 --- /dev/null +++ b/src/libcompiler_builtins/libm/shared/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "shared" +version = "0.1.0" +authors = ["Jorge Aparicio "] + +[dependencies] +lazy_static = "1.0.2" diff --git a/src/libcompiler_builtins/libm/shared/src/lib.rs b/src/libcompiler_builtins/libm/shared/src/lib.rs new file mode 100644 index 0000000000..84676f94f7 --- /dev/null +++ b/src/libcompiler_builtins/libm/shared/src/lib.rs @@ -0,0 +1,471 @@ +#![feature(exact_chunks)] + +#[macro_use] +extern crate lazy_static; + +lazy_static! { + pub static ref F32: Vec = { + let bytes = include_bytes!("../../bin/input/f32"); + + bytes + .exact_chunks(4) + .map(|chunk| { + let mut buf = [0; 4]; + buf.copy_from_slice(chunk); + f32::from_bits(u32::from_le(u32::from_bytes(buf))) + }) + .collect() + }; + pub static ref F32F32: Vec<(f32, f32)> = { + let bytes = include_bytes!("../../bin/input/f32f32"); + + bytes + .exact_chunks(8) + .map(|chunk| { + let mut x0 = [0; 4]; + let mut x1 = [0; 4]; + x0.copy_from_slice(&chunk[..4]); + x1.copy_from_slice(&chunk[4..]); + + ( + f32::from_bits(u32::from_le(u32::from_bytes(x0))), + f32::from_bits(u32::from_le(u32::from_bytes(x1))), + ) + }) + .collect() + }; + pub static ref F32F32F32: Vec<(f32, f32, f32)> = { + let bytes = include_bytes!("../../bin/input/f32f32f32"); + + bytes + .exact_chunks(12) + .map(|chunk| { + let mut x0 = [0; 4]; + let mut x1 = [0; 4]; + let mut x2 = [0; 4]; + x0.copy_from_slice(&chunk[..4]); + x1.copy_from_slice(&chunk[4..8]); + x2.copy_from_slice(&chunk[8..]); + + ( + f32::from_bits(u32::from_le(u32::from_bytes(x0))), + f32::from_bits(u32::from_le(u32::from_bytes(x1))), + f32::from_bits(u32::from_le(u32::from_bytes(x2))), + ) + }) + .collect() + }; + pub static ref F32I32: Vec<(f32, i32)> = { + let bytes = include_bytes!("../../bin/input/f32i16"); + + bytes + .exact_chunks(6) + .map(|chunk| { + let mut x0 = [0; 4]; + let mut x1 = [0; 2]; + x0.copy_from_slice(&chunk[..4]); + x1.copy_from_slice(&chunk[4..]); + + ( + f32::from_bits(u32::from_le(u32::from_bytes(x0))), + i16::from_le(i16::from_bytes(x1)) as i32, + ) + }) + .collect() + }; + pub static ref F64: Vec = { + let bytes = include_bytes!("../../bin/input/f64"); + + bytes + .exact_chunks(8) + .map(|chunk| { + let mut buf = [0; 8]; + buf.copy_from_slice(chunk); + f64::from_bits(u64::from_le(u64::from_bytes(buf))) + }) + .collect() + }; + pub static ref F64F64: Vec<(f64, f64)> = { + let bytes = include_bytes!("../../bin/input/f64f64"); + + bytes + .exact_chunks(16) + .map(|chunk| { + let mut x0 = [0; 8]; + let mut x1 = [0; 8]; + x0.copy_from_slice(&chunk[..8]); + x1.copy_from_slice(&chunk[8..]); + + ( + f64::from_bits(u64::from_le(u64::from_bytes(x0))), + f64::from_bits(u64::from_le(u64::from_bytes(x1))), + ) + }) + .collect() + }; + pub static ref F64F64F64: Vec<(f64, f64, f64)> = { + let bytes = include_bytes!("../../bin/input/f64f64f64"); + + bytes + .exact_chunks(24) + .map(|chunk| { + let mut x0 = [0; 8]; + let mut x1 = [0; 8]; + let mut x2 = [0; 8]; + x0.copy_from_slice(&chunk[..8]); + x1.copy_from_slice(&chunk[8..16]); + x2.copy_from_slice(&chunk[16..]); + + ( + f64::from_bits(u64::from_le(u64::from_bytes(x0))), + f64::from_bits(u64::from_le(u64::from_bytes(x1))), + f64::from_bits(u64::from_le(u64::from_bytes(x2))), + ) + }) + .collect() + }; + pub static ref F64I32: Vec<(f64, i32)> = { + let bytes = include_bytes!("../../bin/input/f64i16"); + + bytes + .exact_chunks(10) + .map(|chunk| { + let mut x0 = [0; 8]; + let mut x1 = [0; 2]; + x0.copy_from_slice(&chunk[..8]); + x1.copy_from_slice(&chunk[8..]); + + ( + f64::from_bits(u64::from_le(u64::from_bytes(x0))), + i16::from_le(i16::from_bytes(x1)) as i32, + ) + }) + .collect() + }; +} + +#[macro_export] +macro_rules! f32 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(4) + .map(|chunk| { + let mut buf = [0; 4]; + buf.copy_from_slice(chunk); + f32::from_bits(u32::from_le(u32::from_bytes(buf))) + }) + .collect::>(); + + for (input, expected) in $crate::F32.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*input)) { + if let Err(error) = libm::_eqf(output, *expected) { + panic!( + "INPUT: {:#x}, OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + input.to_bits(), + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: {:#x}, OUTPUT: PANIC!, EXPECTED: {:#x}", + input.to_bits(), + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f32f32 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(4) + .map(|chunk| { + let mut buf = [0; 4]; + buf.copy_from_slice(chunk); + f32::from_bits(u32::from_le(u32::from_bytes(buf))) + }) + .collect::>(); + + for ((i0, i1), expected) in $crate::F32F32.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*i0, *i1)) { + if let Err(error) = libm::_eqf(output, *expected) { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + i0.to_bits(), + i1.to_bits(), + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: PANIC!, EXPECTED: {:#x}", + i0.to_bits(), + i1.to_bits(), + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f32f32f32 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(4) + .map(|chunk| { + let mut buf = [0; 4]; + buf.copy_from_slice(chunk); + f32::from_bits(u32::from_le(u32::from_bytes(buf))) + }) + .collect::>(); + + for ((i0, i1, i2), expected) in $crate::F32F32F32.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*i0, *i1, *i2)) { + if let Err(error) = libm::_eqf(output, *expected) { + panic!( + "INPUT: ({:#x}, {:#x}, {:#x}), OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + i0.to_bits(), + i1.to_bits(), + i2.to_bits(), + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: PANIC!, EXPECTED: {:#x}", + i0.to_bits(), + i1.to_bits(), + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f32i32 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(4) + .map(|chunk| { + let mut buf = [0; 4]; + buf.copy_from_slice(chunk); + f32::from_bits(u32::from_le(u32::from_bytes(buf))) + }) + .collect::>(); + + for ((i0, i1), expected) in $crate::F32I32.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*i0, *i1)) { + if let Err(error) = libm::_eqf(output, *expected) { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + i0.to_bits(), + i1, + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: PANIC!, EXPECTED: {:#x}", + i0.to_bits(), + i1, + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f64 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(8) + .map(|chunk| { + let mut buf = [0; 8]; + buf.copy_from_slice(chunk); + f64::from_bits(u64::from_le(u64::from_bytes(buf))) + }) + .collect::>(); + + for (input, expected) in shared::F64.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*input)) { + if let Err(error) = libm::_eq(output, *expected) { + panic!( + "INPUT: {:#x}, OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + input.to_bits(), + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: {:#x}, OUTPUT: PANIC!, EXPECTED: {:#x}", + input.to_bits(), + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f64f64 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(8) + .map(|chunk| { + let mut buf = [0; 8]; + buf.copy_from_slice(chunk); + f64::from_bits(u64::from_le(u64::from_bytes(buf))) + }) + .collect::>(); + + for ((i0, i1), expected) in shared::F64F64.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*i0, *i1)) { + if let Err(error) = libm::_eq(output, *expected) { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + i0.to_bits(), + i1.to_bits(), + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: PANIC!, EXPECTED: {:#x}", + i0.to_bits(), + i1.to_bits(), + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f64f64f64 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(8) + .map(|chunk| { + let mut buf = [0; 8]; + buf.copy_from_slice(chunk); + f64::from_bits(u64::from_le(u64::from_bytes(buf))) + }) + .collect::>(); + + for ((i0, i1, i2), expected) in shared::F64F64F64.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*i0, *i1, *i2)) { + if let Err(error) = libm::_eq(output, *expected) { + panic!( + "INPUT: ({:#x}, {:#x}, {:#x}), OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + i0.to_bits(), + i1.to_bits(), + i2.to_bits(), + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: PANIC!, EXPECTED: {:#x}", + i0.to_bits(), + i1.to_bits(), + expected.to_bits() + ); + } + } + } + )+ + } +} + +#[macro_export] +macro_rules! f64i32 { + ($lib:expr, $($fun:ident),+) => { + $( + #[test] + fn $fun() { + let expected = include_bytes!(concat!("../bin/output/", $lib, ".", stringify!($fun))) + .exact_chunks(8) + .map(|chunk| { + let mut buf = [0; 8]; + buf.copy_from_slice(chunk); + f64::from_bits(u64::from_le(u64::from_bytes(buf))) + }) + .collect::>(); + + for ((i0, i1), expected) in shared::F64I32.iter().zip(&expected) { + if let Ok(output) = panic::catch_unwind(|| libm::$fun(*i0, *i1)) { + if let Err(error) = libm::_eq(output, *expected) { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: {:#x}, EXPECTED: {:#x}, ERROR: {}", + i0.to_bits(), + i1, + output.to_bits(), + expected.to_bits(), + error + ); + } + } else { + panic!( + "INPUT: ({:#x}, {:#x}), OUTPUT: PANIC!, EXPECTED: {:#x}", + i0.to_bits(), + i1, + expected.to_bits() + ); + } + } + } + )+ + } +} diff --git a/src/libcompiler_builtins/libm/src/lib.rs b/src/libcompiler_builtins/libm/src/lib.rs index 5121cbfbac..6be4587281 100644 --- a/src/libcompiler_builtins/libm/src/lib.rs +++ b/src/libcompiler_builtins/libm/src/lib.rs @@ -11,6 +11,10 @@ #![deny(warnings)] #![no_std] +#![cfg_attr( + all(target_arch = "wasm32", not(feature = "stable")), + feature(core_intrinsics) +)] mod math; @@ -21,14 +25,34 @@ pub use math::*; /// Approximate equality with 1 ULP of tolerance #[doc(hidden)] #[inline] -pub fn _eqf(a: u32, b: u32) -> bool { - (a as i32).wrapping_sub(b as i32).abs() <= 1 +pub fn _eqf(a: f32, b: f32) -> Result<(), u32> { + if a.is_nan() && b.is_nan() { + Ok(()) + } else { + let err = (a.to_bits() as i32).wrapping_sub(b.to_bits() as i32).abs(); + + if err <= 1 { + Ok(()) + } else { + Err(err as u32) + } + } } #[doc(hidden)] #[inline] -pub fn _eq(a: u64, b: u64) -> bool { - (a as i64).wrapping_sub(b as i64).abs() <= 1 +pub fn _eq(a: f64, b: f64) -> Result<(), u64> { + if a.is_nan() && b.is_nan() { + Ok(()) + } else { + let err = (a.to_bits() as i64).wrapping_sub(b.to_bits() as i64).abs(); + + if err <= 1 { + Ok(()) + } else { + Err(err as u64) + } + } } /// Math support for `f32` diff --git a/src/libcompiler_builtins/libm/src/math/ceil.rs b/src/libcompiler_builtins/libm/src/math/ceil.rs index 4db2ca8403..5dbfa6a2c0 100644 --- a/src/libcompiler_builtins/libm/src/math/ceil.rs +++ b/src/libcompiler_builtins/libm/src/math/ceil.rs @@ -4,6 +4,14 @@ const TOINT: f64 = 1. / f64::EPSILON; #[inline] pub fn ceil(x: f64) -> f64 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f64.ceil` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::ceilf64(x) } + } + } let u: u64 = x.to_bits(); let e: i64 = (u >> 52 & 0x7ff) as i64; let y: f64; diff --git a/src/libcompiler_builtins/libm/src/math/ceilf.rs b/src/libcompiler_builtins/libm/src/math/ceilf.rs index 16bffb3002..c8cd4b5aa5 100644 --- a/src/libcompiler_builtins/libm/src/math/ceilf.rs +++ b/src/libcompiler_builtins/libm/src/math/ceilf.rs @@ -2,6 +2,14 @@ use core::f32; #[inline] pub fn ceilf(x: f32) -> f32 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f32.ceil` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::ceilf32(x) } + } + } let mut ui = x.to_bits(); let e = (((ui >> 23) & 0xff) - 0x7f) as i32; diff --git a/src/libcompiler_builtins/libm/src/math/fabs.rs b/src/libcompiler_builtins/libm/src/math/fabs.rs index 9e081f3f9f..7c804653c9 100644 --- a/src/libcompiler_builtins/libm/src/math/fabs.rs +++ b/src/libcompiler_builtins/libm/src/math/fabs.rs @@ -2,5 +2,13 @@ use core::u64; #[inline] pub fn fabs(x: f64) -> f64 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f64.abs` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::fabsf64(x) } + } + } f64::from_bits(x.to_bits() & (u64::MAX / 2)) } diff --git a/src/libcompiler_builtins/libm/src/math/fabsf.rs b/src/libcompiler_builtins/libm/src/math/fabsf.rs index 4cc9411169..884c20f6c4 100644 --- a/src/libcompiler_builtins/libm/src/math/fabsf.rs +++ b/src/libcompiler_builtins/libm/src/math/fabsf.rs @@ -1,4 +1,12 @@ #[inline] pub fn fabsf(x: f32) -> f32 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f32.abs` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::fabsf32(x) } + } + } f32::from_bits(x.to_bits() & 0x7fffffff) } diff --git a/src/libcompiler_builtins/libm/src/math/floor.rs b/src/libcompiler_builtins/libm/src/math/floor.rs index 997865d39e..b14a48d55b 100644 --- a/src/libcompiler_builtins/libm/src/math/floor.rs +++ b/src/libcompiler_builtins/libm/src/math/floor.rs @@ -4,6 +4,14 @@ const TOINT: f64 = 1. / f64::EPSILON; #[inline] pub fn floor(x: f64) -> f64 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f64.floor` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::floorf64(x) } + } + } let ui = x.to_bits(); let e = ((ui >> 52) & 0x7ff) as i32; diff --git a/src/libcompiler_builtins/libm/src/math/floorf.rs b/src/libcompiler_builtins/libm/src/math/floorf.rs index 9c263b5182..71b5953df3 100644 --- a/src/libcompiler_builtins/libm/src/math/floorf.rs +++ b/src/libcompiler_builtins/libm/src/math/floorf.rs @@ -2,6 +2,14 @@ use core::f32; #[inline] pub fn floorf(x: f32) -> f32 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f32.floor` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::floorf32(x) } + } + } let mut ui = x.to_bits(); let e = (((ui >> 23) & 0xff) - 0x7f) as i32; diff --git a/src/libcompiler_builtins/libm/src/math/mod.rs b/src/libcompiler_builtins/libm/src/math/mod.rs index da34fb4cec..e51b1511df 100644 --- a/src/libcompiler_builtins/libm/src/math/mod.rs +++ b/src/libcompiler_builtins/libm/src/math/mod.rs @@ -58,6 +58,17 @@ macro_rules! i { }; } +macro_rules! llvm_intrinsically_optimized { + (#[cfg($($clause:tt)*)] $e:expr) => { + #[cfg(all(not(feature = "stable"), $($clause)*))] + { + if true { // thwart the dead code lint + $e + } + } + }; +} + // Public modules mod acos; mod acosf; diff --git a/src/libcompiler_builtins/libm/src/math/sqrt.rs b/src/libcompiler_builtins/libm/src/math/sqrt.rs index cbadb49bba..b2387a26e7 100644 --- a/src/libcompiler_builtins/libm/src/math/sqrt.rs +++ b/src/libcompiler_builtins/libm/src/math/sqrt.rs @@ -82,6 +82,18 @@ const TINY: f64 = 1.0e-300; #[inline] pub fn sqrt(x: f64) -> f64 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f64.sqrt` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return if x < 0.0 { + f64::NAN + } else { + unsafe { ::core::intrinsics::sqrtf64(x) } + } + } + } let mut z: f64; let sign: u32 = 0x80000000; let mut ix0: i32; diff --git a/src/libcompiler_builtins/libm/src/math/sqrtf.rs b/src/libcompiler_builtins/libm/src/math/sqrtf.rs index 49984689ef..33cafbcbda 100644 --- a/src/libcompiler_builtins/libm/src/math/sqrtf.rs +++ b/src/libcompiler_builtins/libm/src/math/sqrtf.rs @@ -17,6 +17,18 @@ const TINY: f32 = 1.0e-30; #[inline] pub fn sqrtf(x: f32) -> f32 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f32.sqrt` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return if x < 0.0 { + ::core::f32::NAN + } else { + unsafe { ::core::intrinsics::sqrtf32(x) } + } + } + } let mut z: f32; let sign: i32 = 0x80000000u32 as i32; let mut ix: i32; diff --git a/src/libcompiler_builtins/libm/src/math/trunc.rs b/src/libcompiler_builtins/libm/src/math/trunc.rs index 6bea67cbc1..8eecfcf538 100644 --- a/src/libcompiler_builtins/libm/src/math/trunc.rs +++ b/src/libcompiler_builtins/libm/src/math/trunc.rs @@ -2,6 +2,14 @@ use core::f64; #[inline] pub fn trunc(x: f64) -> f64 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f64.trunc` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::truncf64(x) } + } + } let x1p120 = f64::from_bits(0x4770000000000000); // 0x1p120f === 2 ^ 120 let mut i: u64 = x.to_bits(); diff --git a/src/libcompiler_builtins/libm/src/math/truncf.rs b/src/libcompiler_builtins/libm/src/math/truncf.rs index 9d42620d96..0d74fea9c9 100644 --- a/src/libcompiler_builtins/libm/src/math/truncf.rs +++ b/src/libcompiler_builtins/libm/src/math/truncf.rs @@ -2,6 +2,14 @@ use core::f32; #[inline] pub fn truncf(x: f32) -> f32 { + // On wasm32 we know that LLVM's intrinsic will compile to an optimized + // `f32.trunc` native instruction, so we can leverage this for both code size + // and speed. + llvm_intrinsically_optimized! { + #[cfg(target_arch = "wasm32")] { + return unsafe { ::core::intrinsics::truncf32(x) } + } + } let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120 let mut i: u32 = x.to_bits(); diff --git a/src/libcompiler_builtins/libm/test-generator/README.md b/src/libcompiler_builtins/libm/test-generator/README.md deleted file mode 100644 index cbacd88f14..0000000000 --- a/src/libcompiler_builtins/libm/test-generator/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# `test-generator` - -This is a tool to generate test cases for the `libm` crate. - -The generator randomly creates inputs for each math function, then proceeds to compute the -expected output for the given function by running the MUSL *C implementation* of the function and -finally it packs the test cases as a Cargo test file. For this reason, this generator **must** -always be compiled for the `x86_64-unknown-linux-musl` target. diff --git a/src/libcompiler_builtins/libm/test-generator/src/main.rs b/src/libcompiler_builtins/libm/test-generator/src/main.rs deleted file mode 100644 index 4c4e420a28..0000000000 --- a/src/libcompiler_builtins/libm/test-generator/src/main.rs +++ /dev/null @@ -1,788 +0,0 @@ -// NOTE we intentionally avoid using the `quote` crate here because it doesn't work with the -// `x86_64-unknown-linux-musl` target. - -// NOTE usually the only thing you need to do to test a new math function is to add it to one of the -// macro invocations found in the bottom of this file. - -#[macro_use] -extern crate itertools; -extern crate rand; - -use std::error::Error; -use std::fmt::Write as _0; -use std::fs::{self, File}; -use std::io::Write as _1; -use std::{f32, f64, i16, u16, u32, u64, u8}; - -use rand::{Rng, SeedableRng, XorShiftRng}; - -// Number of test cases to generate -const NTESTS: usize = 10_000; - -// TODO tweak these functions to generate edge cases (zero, infinity, NaN) more often -fn f32(rng: &mut XorShiftRng) -> f32 { - let sign = if rng.gen_bool(0.5) { 1 << 31 } else { 0 }; - let exponent = (rng.gen_range(0, u8::MAX) as u32) << 23; - let mantissa = rng.gen_range(0, u32::MAX) & ((1 << 23) - 1); - - f32::from_bits(sign + exponent + mantissa) -} - -fn f64(rng: &mut XorShiftRng) -> f64 { - let sign = if rng.gen_bool(0.5) { 1 << 63 } else { 0 }; - let exponent = (rng.gen_range(0, u16::MAX) as u64 & ((1 << 11) - 1)) << 52; - let mantissa = rng.gen_range(0, u64::MAX) & ((1 << 52) - 1); - - f64::from_bits(sign + exponent + mantissa) -} - -const EDGE_CASES32: &[f32] = &[ - -0., - 0., - f32::EPSILON, - f32::INFINITY, - f32::MAX, - f32::MIN, - f32::MIN_POSITIVE, - f32::NAN, - f32::NEG_INFINITY, -]; - -const EDGE_CASES64: &[f64] = &[ - -0., - 0., - f64::EPSILON, - f64::INFINITY, - f64::MAX, - f64::MIN, - f64::MIN_POSITIVE, - f64::NAN, - f64::NEG_INFINITY, -]; - -// fn(f32) -> f32 -macro_rules! f32_f32 { - ($($intr:ident,)*) => { - fn f32_f32(rng: &mut XorShiftRng) -> Result<(), Box> { - // MUSL C implementation of the function to test - extern "C" { - $(fn $intr(_: f32) -> f32;)* - } - - $( - let mut cases = String::new(); - - // random inputs - for inp in EDGE_CASES32.iter().cloned().chain((0..NTESTS).map(|_| f32(rng))) { - let out = unsafe { $intr(inp) }; - - let inp = inp.to_bits(); - let out = out.to_bits(); - - write!(cases, "({}, {})", inp, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[(u32, u32)] = &[ - {1} - ]; - - for case in CASES {{ - let (inp, expected) = *case; - - if let Ok(outf) = - panic::catch_unwind(|| libm::{0}(f32::from_bits(inp))) - {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f32::from_bits(expected).is_nan()) - || libm::_eqf(outi, expected)) - {{ - panic!( - \"input: {{}}, output: {{}}, expected: {{}}\", - inp, outi, expected, - ); - }} - }} else {{ - panic!( - \"input: {{}}, output: PANIC, expected: {{}}\", - inp, expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - } -} - -// fn(f32, f32) -> f32 -macro_rules! f32f32_f32 { - ($($intr:ident,)*) => { - fn f32f32_f32(rng: &mut XorShiftRng) -> Result<(), Box> { - extern "C" { - $(fn $intr(_: f32, _: f32) -> f32;)* - } - - let mut rng2 = rng.clone(); - let mut rng3 = rng.clone(); - $( - let mut cases = String::new(); - for (i1, i2) in iproduct!( - EDGE_CASES32.iter().cloned(), - EDGE_CASES32.iter().cloned() - ).chain(EDGE_CASES32.iter().map(|i1| (*i1, f32(rng)))) - .chain(EDGE_CASES32.iter().map(|i2| (f32(&mut rng2), *i2))) - .chain((0..NTESTS).map(|_| (f32(&mut rng3), f32(&mut rng3)))) - { - let out = unsafe { $intr(i1, i2) }; - - let i1 = i1.to_bits(); - let i2 = i2.to_bits(); - let out = out.to_bits(); - - write!(cases, "(({}, {}), {})", i1, i2, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[((u32, u32), u32)] = &[ - {1} - ]; - - for case in CASES {{ - let ((i1, i2), expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}(f32::from_bits(i1), f32::from_bits(i2)) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f32::from_bits(expected).is_nan()) - || libm::_eqf(outi, expected)) - {{ - panic!( - \"input: {{:?}}, output: {{}}, expected: {{}}\", - (i1, i2), - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{:?}}, output: PANIC, expected: {{}}\", - (i1, i2), - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - }; -} - -// fn(f32, f32, f32) -> f32 -macro_rules! f32f32f32_f32 { - ($($intr:ident,)*) => { - fn f32f32f32_f32(rng: &mut XorShiftRng) -> Result<(), Box> { - extern "C" { - $(fn $intr(_: f32, _: f32, _: f32) -> f32;)* - } - - let mut rng2 = rng.clone(); - $( - let mut cases = String::new(); - for (i1, i2, i3) in iproduct!( - EDGE_CASES32.iter().cloned(), - EDGE_CASES32.iter().cloned(), - EDGE_CASES32.iter().cloned() - ).chain(EDGE_CASES32.iter().map(|i1| (*i1, f32(rng), f32(rng)))) - .chain((0..NTESTS).map(|_| (f32(&mut rng2), f32(&mut rng2), f32(&mut rng2)))) - { - let out = unsafe { $intr(i1, i2, i3) }; - - let i1 = i1.to_bits(); - let i2 = i2.to_bits(); - let i3 = i3.to_bits(); - let out = out.to_bits(); - - write!(cases, "(({}, {}, {}), {})", i1, i2, i3, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[((u32, u32, u32), u32)] = &[ - {1} - ]; - - for case in CASES {{ - let ((i1, i2, i3), expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}( - f32::from_bits(i1), - f32::from_bits(i2), - f32::from_bits(i3), - ) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f32::from_bits(expected).is_nan()) - || libm::_eqf(outi, expected)) - {{ - panic!( - \"input: {{:?}}, output: {{}}, expected: {{}}\", - (i1, i2, i3), - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{:?}}, output: PANIC, expected: {{}}\", - (i1, i2, i3), - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - }; -} - -// fn(f32, i32) -> f32 -macro_rules! f32i32_f32 { - ($($intr:ident,)*) => { - fn f32i32_f32(rng: &mut XorShiftRng) -> Result<(), Box> { - extern "C" { - $(fn $intr(_: f32, _: i32) -> f32;)* - } - - let mut rng2 = rng.clone(); - $( - let mut cases = String::new(); - for i1 in EDGE_CASES32.iter().cloned().chain((0..NTESTS).map(|_| f32(&mut rng2))) { - let i2 = rng.gen_range(i16::MIN, i16::MAX); - let out = unsafe { $intr(i1, i2 as i32) }; - - let i1 = i1.to_bits(); - let out = out.to_bits(); - - write!(cases, "(({}, {}), {})", i1, i2, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[((u32, i16), u32)] = &[ - {1} - ]; - - for case in CASES {{ - let ((i1, i2), expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}(f32::from_bits(i1), i2 as i32) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f32::from_bits(expected).is_nan()) - || libm::_eqf(outi, expected)) - {{ - panic!( - \"input: {{:?}}, output: {{}}, expected: {{}}\", - (i1, i2), - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{:?}}, output: PANIC, expected: {{}}\", - (i1, i2), - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - }; -} - -// fn(f64) -> f64 -macro_rules! f64_f64 { - ($($intr:ident,)*) => { - fn f64_f64(rng: &mut XorShiftRng) -> Result<(), Box> { - // MUSL C implementation of the function to test - extern "C" { - $(fn $intr(_: f64) -> f64;)* - } - - $( - let mut cases = String::new(); - for inp in EDGE_CASES64.iter().cloned().chain((0..NTESTS).map(|_| f64(rng))) { - let out = unsafe { $intr(inp) }; - - let inp = inp.to_bits(); - let out = out.to_bits(); - - write!(cases, "({}, {})", inp, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[(u64, u64)] = &[ - {1} - ]; - - for case in CASES {{ - let (inp, expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}(f64::from_bits(inp)) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f64::from_bits(expected).is_nan()) - || libm::_eq(outi, expected)) - {{ - panic!( - \"input: {{}}, output: {{}}, expected: {{}}\", - inp, - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{}}, output: PANIC, expected: {{}}\", - inp, - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - } -} - -// fn(f64, f64) -> f64 -macro_rules! f64f64_f64 { - ($($intr:ident,)*) => { - fn f64f64_f64(rng: &mut XorShiftRng) -> Result<(), Box> { - extern "C" { - $(fn $intr(_: f64, _: f64) -> f64;)* - } - - let mut rng2 = rng.clone(); - let mut rng3 = rng.clone(); - $( - let mut cases = String::new(); - for (i1, i2) in iproduct!( - EDGE_CASES64.iter().cloned(), - EDGE_CASES64.iter().cloned() - ).chain(EDGE_CASES64.iter().map(|i1| (*i1, f64(rng)))) - .chain(EDGE_CASES64.iter().map(|i2| (f64(&mut rng2), *i2))) - .chain((0..NTESTS).map(|_| (f64(&mut rng3), f64(&mut rng3)))) - { - let out = unsafe { $intr(i1, i2) }; - - let i1 = i1.to_bits(); - let i2 = i2.to_bits(); - let out = out.to_bits(); - - write!(cases, "(({}, {}), {})", i1, i2, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[((u64, u64), u64)] = &[ - {1} - ]; - - for case in CASES {{ - let ((i1, i2), expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}(f64::from_bits(i1), f64::from_bits(i2)) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f64::from_bits(expected).is_nan()) || - libm::_eq(outi, expected)) {{ - panic!( - \"input: {{:?}}, output: {{}}, expected: {{}}\", - (i1, i2), - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{:?}}, output: PANIC, expected: {{}}\", - (i1, i2), - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - }; -} - -// fn(f64, f64, f64) -> f64 -macro_rules! f64f64f64_f64 { - ($($intr:ident,)*) => { - fn f64f64f64_f64(rng: &mut XorShiftRng) -> Result<(), Box> { - extern "C" { - $(fn $intr(_: f64, _: f64, _: f64) -> f64;)* - } - - let mut rng2 = rng.clone(); - $( - let mut cases = String::new(); - for (i1, i2, i3) in iproduct!( - EDGE_CASES64.iter().cloned(), - EDGE_CASES64.iter().cloned(), - EDGE_CASES64.iter().cloned() - ).chain(EDGE_CASES64.iter().map(|i1| (*i1, f64(rng), f64(rng)))) - .chain((0..NTESTS).map(|_| (f64(&mut rng2), f64(&mut rng2), f64(&mut rng2)))) - { - let out = unsafe { $intr(i1, i2, i3) }; - - let i1 = i1.to_bits(); - let i2 = i2.to_bits(); - let i3 = i3.to_bits(); - let out = out.to_bits(); - - write!(cases, "(({}, {}, {}), {})", i1, i2, i3, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[((u64, u64, u64), u64)] = &[ - {1} - ]; - - for case in CASES {{ - let ((i1, i2, i3), expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}( - f64::from_bits(i1), - f64::from_bits(i2), - f64::from_bits(i3), - ) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f64::from_bits(expected).is_nan()) - || libm::_eq(outi, expected)) - {{ - panic!( - \"input: {{:?}}, output: {{}}, expected: {{}}\", - (i1, i2, i3), - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{:?}}, output: PANIC, expected: {{}}\", - (i1, i2, i3), - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - }; -} - -// fn(f64, i32) -> f64 -macro_rules! f64i32_f64 { - ($($intr:ident,)*) => { - fn f64i32_f64(rng: &mut XorShiftRng) -> Result<(), Box> { - extern "C" { - $(fn $intr(_: f64, _: i32) -> f64;)* - } - - let mut rng2 = rng.clone(); - $( - let mut cases = String::new(); - for i1 in EDGE_CASES64.iter().cloned().chain((0..NTESTS).map(|_| f64(&mut rng2))) { - let i2 = rng.gen_range(i16::MIN, i16::MAX); - let out = unsafe { $intr(i1, i2 as i32) }; - - let i1 = i1.to_bits(); - let out = out.to_bits(); - - write!(cases, "(({}, {}), {})", i1, i2, out).unwrap(); - cases.push(','); - } - - let mut f = File::create(concat!("tests/", stringify!($intr), ".rs"))?; - write!(f, " - #![deny(warnings)] - - extern crate libm; - - use std::panic; - - #[test] - fn {0}() {{ - const CASES: &[((u64, i16), u64)] = &[ - {1} - ]; - - for case in CASES {{ - let ((i1, i2), expected) = *case; - - if let Ok(outf) = panic::catch_unwind(|| {{ - libm::{0}(f64::from_bits(i1), i2 as i32) - }}) {{ - let outi = outf.to_bits(); - - if !((outf.is_nan() && f64::from_bits(expected).is_nan()) || - libm::_eq(outi, expected)) {{ - panic!( - \"input: {{:?}}, output: {{}}, expected: {{}}\", - (i1, i2), - outi, - expected, - ); - }} - }} else {{ - panic!( - \"input: {{:?}}, output: PANIC, expected: {{}}\", - (i1, i2), - expected, - ); - }} - }} - }} -", - stringify!($intr), - cases)?; - )* - - Ok(()) - } - }; -} - -fn main() -> Result<(), Box> { - fs::remove_dir_all("tests").ok(); - fs::create_dir("tests")?; - - let mut rng = XorShiftRng::from_rng(&mut rand::thread_rng())?; - - f32_f32(&mut rng)?; - f32f32_f32(&mut rng)?; - f32f32f32_f32(&mut rng)?; - f32i32_f32(&mut rng)?; - f64_f64(&mut rng)?; - f64f64_f64(&mut rng)?; - f64f64f64_f64(&mut rng)?; - f64i32_f64(&mut rng)?; - - Ok(()) -} - -/* Functions to test */ - -// With signature `fn(f32) -> f32` -f32_f32! { - acosf, - floorf, - truncf, - asinf, - atanf, - cbrtf, - cosf, - ceilf, - coshf, - exp2f, - expf, - expm1f, - log10f, - log1pf, - log2f, - logf, - roundf, - sinf, - sinhf, - tanf, - tanhf, - fabsf, - sqrtf, -} - -// With signature `fn(f32, f32) -> f32` -f32f32_f32! { - atan2f, - fdimf, - hypotf, - fmodf, - powf, -} - -// With signature `fn(f32, f32, f32) -> f32` -f32f32f32_f32! { - fmaf, -} - -// With signature `fn(f32, i32) -> f32` -f32i32_f32! { - scalbnf, -} - -// With signature `fn(f64) -> f64` -f64_f64! { - acos, - asin, - atan, - cbrt, - ceil, - cos, - cosh, - exp, - exp2, - expm1, - floor, - log, - log10, - log1p, - log2, - round, - sin, - sinh, - sqrt, - tan, - tanh, - trunc, - fabs, -} - -// With signature `fn(f64, f64) -> f64` -f64f64_f64! { - atan2, - fdim, - fmod, - hypot, - pow, -} - -// With signature `fn(f64, f64, f64) -> f64` -f64f64f64_f64! { - fma, -} - -// With signature `fn(f64, i32) -> f64` -f64i32_f64! { - scalbn, -} diff --git a/src/libcompiler_builtins/src/arm.rs b/src/libcompiler_builtins/src/arm.rs index dbd6f87ca5..9e43aec7d3 100644 --- a/src/libcompiler_builtins/src/arm.rs +++ b/src/libcompiler_builtins/src/arm.rs @@ -4,7 +4,7 @@ use core::intrinsics; // calling convention which can't be implemented using a normal Rust function. // NOTE The only difference between the iOS and non-iOS versions of those functions is that the iOS // versions use 3 leading underscores in the names of called functions instead of 2. -#[cfg(not(target_os = "ios"))] +#[cfg(not(any(target_os = "ios", target_env = "msvc")))] #[naked] #[cfg_attr(not(feature = "mangled-names"), no_mangle)] pub unsafe fn __aeabi_uidivmod() { diff --git a/src/libcompiler_builtins/src/int/sdiv.rs b/src/libcompiler_builtins/src/int/sdiv.rs index 2de73b0eab..89bb51a47b 100644 --- a/src/libcompiler_builtins/src/int/sdiv.rs +++ b/src/libcompiler_builtins/src/int/sdiv.rs @@ -73,7 +73,8 @@ intrinsics! { } #[use_c_shim_if(all(target_arch = "arm", - not(target_os = "ios")), + not(target_os = "ios"), + not(target_env = "msvc")), not(thumbv6m))] pub extern "C" fn __modsi3(a: i32, b: i32) -> i32 { a.mod_(b) @@ -89,7 +90,7 @@ intrinsics! { a.mod_(b) } - #[use_c_shim_if(all(target_arch = "arm", + #[use_c_shim_if(all(target_arch = "arm", not(target_env = "msvc"), not(target_os = "ios"), not(thumbv6m)))] pub extern "C" fn __divmodsi4(a: i32, b: i32, rem: &mut i32) -> i32 { a.divmod(b, rem, |a, b| __divsi3(a, b)) diff --git a/src/libcompiler_builtins/src/int/udiv.rs b/src/libcompiler_builtins/src/int/udiv.rs index 4382460e78..a2572227f9 100644 --- a/src/libcompiler_builtins/src/int/udiv.rs +++ b/src/libcompiler_builtins/src/int/udiv.rs @@ -211,6 +211,7 @@ intrinsics! { #[use_c_shim_if(all(target_arch = "arm", not(target_os = "ios"), + not(target_env = "msvc"), not(thumbv6m)))] /// Returns `n % d` pub extern "C" fn __umodsi3(n: u32, d: u32) -> u32 { @@ -220,6 +221,7 @@ intrinsics! { #[use_c_shim_if(all(target_arch = "arm", not(target_os = "ios"), + not(target_env = "msvc"), not(thumbv6m)))] /// Returns `n / d` and sets `*rem = n % d` pub extern "C" fn __udivmodsi4(n: u32, d: u32, rem: Option<&mut u32>) -> u32 { diff --git a/src/libcore/Cargo.toml b/src/libcore/Cargo.toml index 0b01cfc488..7fd61f07d5 100644 --- a/src/libcore/Cargo.toml +++ b/src/libcore/Cargo.toml @@ -21,3 +21,7 @@ path = "../libcore/benches/lib.rs" [dev-dependencies] rand = "0.5" + +[features] +# Make panics and failed asserts immediately abort without formatting any message +panic_immediate_abort = [] diff --git a/src/libcore/alloc.rs b/src/libcore/alloc.rs index 4efcaae59b..58639808fa 100644 --- a/src/libcore/alloc.rs +++ b/src/libcore/alloc.rs @@ -164,15 +164,13 @@ impl Layout { /// alignment. In other words, if `K` has size 16, `K.align_to(32)` /// will *still* have size 16. /// - /// # Panics - /// - /// Panics if the combination of `self.size()` and the given `align` - /// violates the conditions listed in + /// Returns an error if the combination of `self.size()` and the given + /// `align` violates the conditions listed in /// [`Layout::from_size_align`](#method.from_size_align). - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn align_to(&self, align: usize) -> Self { - Layout::from_size_align(self.size(), cmp::max(self.align(), align)).unwrap() + pub fn align_to(&self, align: usize) -> Result { + Layout::from_size_align(self.size(), cmp::max(self.align(), align)) } /// Returns the amount of padding we must insert after `self` @@ -191,7 +189,7 @@ impl Layout { /// to be less than or equal to the alignment of the starting /// address for the whole allocated block of memory. One way to /// satisfy this constraint is to ensure `align <= self.align()`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn padding_needed_for(&self, align: usize) -> usize { let len = self.size(); @@ -220,6 +218,23 @@ impl Layout { len_rounded_up.wrapping_sub(len) } + /// Creates a layout by rounding the size of this layout up to a multiple + /// of the layout's alignment. + /// + /// Returns `Err` if the padded size would overflow. + /// + /// This is equivalent to adding the result of `padding_needed_for` + /// to the layout's current size. + #[unstable(feature = "alloc_layout_extra", issue = "55724")] + #[inline] + pub fn pad_to_align(&self) -> Result { + let pad = self.padding_needed_for(self.align()); + let new_size = self.size().checked_add(pad) + .ok_or(LayoutErr { private: () })?; + + Layout::from_size_align(new_size, self.align()) + } + /// Creates a layout describing the record for `n` instances of /// `self`, with a suitable amount of padding between each to /// ensure that each instance is given its requested size and @@ -228,7 +243,7 @@ impl Layout { /// of each element in the array. /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> { let padded_size = self.size().checked_add(self.padding_needed_for(self.align())) @@ -248,13 +263,16 @@ impl Layout { /// will be properly aligned. Note that the result layout will /// satisfy the alignment properties of both `self` and `next`. /// + /// The resulting layout will be the same as that of a C struct containing + /// two fields with the layouts of `self` and `next`, in that order. + /// /// Returns `Some((k, offset))`, where `k` is layout of the concatenated /// record and `offset` is the relative location, in bytes, of the /// start of the `next` embedded within the concatenated record /// (assuming that the record itself starts at offset 0). /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> { let new_align = cmp::max(self.align(), next.align()); @@ -281,7 +299,7 @@ impl Layout { /// aligned. /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn repeat_packed(&self, n: usize) -> Result { let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?; @@ -293,29 +311,20 @@ impl Layout { /// padding is inserted, the alignment of `next` is irrelevant, /// and is not incorporated *at all* into the resulting layout. /// - /// Returns `(k, offset)`, where `k` is layout of the concatenated - /// record and `offset` is the relative location, in bytes, of the - /// start of the `next` embedded within the concatenated record - /// (assuming that the record itself starts at offset 0). - /// - /// (The `offset` is always the same as `self.size()`; we use this - /// signature out of convenience in matching the signature of - /// `extend`.) - /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] - pub fn extend_packed(&self, next: Self) -> Result<(Self, usize), LayoutErr> { + pub fn extend_packed(&self, next: Self) -> Result { let new_size = self.size().checked_add(next.size()) .ok_or(LayoutErr { private: () })?; let layout = Layout::from_size_align(new_size, self.align())?; - Ok((layout, self.size())) + Ok(layout) } /// Creates a layout describing the record for a `[T; n]`. /// /// On arithmetic overflow, returns `LayoutErr`. - #[unstable(feature = "allocator_api", issue = "32838")] + #[unstable(feature = "alloc_layout_extra", issue = "55724")] #[inline] pub fn array(n: usize) -> Result { Layout::new::() @@ -514,7 +523,7 @@ pub unsafe trait GlobalAlloc { ptr } - /// Shink or grow a block of memory to the given `new_size`. + /// Shrink or grow a block of memory to the given `new_size`. /// The block is described by the given `ptr` pointer and `layout`. /// /// If this returns a non-null pointer, then ownership of the memory block @@ -765,7 +774,7 @@ pub unsafe trait Alloc { // realloc. alloc_excess, realloc_excess /// Returns a pointer suitable for holding data described by - /// a new layout with `layout`’s alginment and a size given + /// a new layout with `layout`’s alignment and a size given /// by `new_size`. To /// accomplish this, this may extend or shrink the allocation /// referenced by `ptr` to fit the new layout. diff --git a/src/libcore/any.rs b/src/libcore/any.rs index 6b26093439..c2113dfd2a 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -39,7 +39,7 @@ //! //! // Logger function for any type that implements Debug. //! fn log(value: &T) { -//! let value_any = value as &Any; +//! let value_any = value as &dyn Any; //! //! // try to convert our value to a String. If successful, we want to //! // output the String's length as well as its value. If not, it's a @@ -95,7 +95,7 @@ pub trait Any: 'static { /// /// use std::any::{Any, TypeId}; /// - /// fn is_string(s: &Any) -> bool { + /// fn is_string(s: &dyn Any) -> bool { /// TypeId::of::() == s.get_type_id() /// } /// @@ -151,7 +151,7 @@ impl dyn Any { /// ``` /// use std::any::Any; /// - /// fn is_string(s: &Any) { + /// fn is_string(s: &dyn Any) { /// if s.is::() { /// println!("It's a string!"); /// } else { @@ -185,7 +185,7 @@ impl dyn Any { /// ``` /// use std::any::Any; /// - /// fn print_if_string(s: &Any) { + /// fn print_if_string(s: &dyn Any) { /// if let Some(string) = s.downcast_ref::() { /// println!("It's a string({}): '{}'", string.len(), string); /// } else { @@ -218,7 +218,7 @@ impl dyn Any { /// ``` /// use std::any::Any; /// - /// fn modify_if_u32(s: &mut Any) { + /// fn modify_if_u32(s: &mut dyn Any) { /// if let Some(num) = s.downcast_mut::() { /// *num = 42; /// } @@ -256,7 +256,7 @@ impl dyn Any+Send { /// ``` /// use std::any::Any; /// - /// fn is_string(s: &(Any + Send)) { + /// fn is_string(s: &(dyn Any + Send)) { /// if s.is::() { /// println!("It's a string!"); /// } else { @@ -282,7 +282,7 @@ impl dyn Any+Send { /// ``` /// use std::any::Any; /// - /// fn print_if_string(s: &(Any + Send)) { + /// fn print_if_string(s: &(dyn Any + Send)) { /// if let Some(string) = s.downcast_ref::() { /// println!("It's a string({}): '{}'", string.len(), string); /// } else { @@ -308,7 +308,7 @@ impl dyn Any+Send { /// ``` /// use std::any::Any; /// - /// fn modify_if_u32(s: &mut (Any + Send)) { + /// fn modify_if_u32(s: &mut (dyn Any + Send)) { /// if let Some(num) = s.downcast_mut::() { /// *num = 42; /// } @@ -340,7 +340,7 @@ impl dyn Any+Send+Sync { /// ``` /// use std::any::Any; /// - /// fn is_string(s: &(Any + Send + Sync)) { + /// fn is_string(s: &(dyn Any + Send + Sync)) { /// if s.is::() { /// println!("It's a string!"); /// } else { @@ -366,7 +366,7 @@ impl dyn Any+Send+Sync { /// ``` /// use std::any::Any; /// - /// fn print_if_string(s: &(Any + Send + Sync)) { + /// fn print_if_string(s: &(dyn Any + Send + Sync)) { /// if let Some(string) = s.downcast_ref::() { /// println!("It's a string({}): '{}'", string.len(), string); /// } else { @@ -392,7 +392,7 @@ impl dyn Any+Send+Sync { /// ``` /// use std::any::Any; /// - /// fn modify_if_u32(s: &mut (Any + Send + Sync)) { + /// fn modify_if_u32(s: &mut (dyn Any + Send + Sync)) { /// if let Some(num) = s.downcast_mut::() { /// *num = 42; /// } diff --git a/src/libcore/array.rs b/src/libcore/array.rs index 3d24f8902b..26e7a79d35 100644 --- a/src/libcore/array.rs +++ b/src/libcore/array.rs @@ -148,6 +148,15 @@ macro_rules! array_impls { } } + #[unstable(feature = "try_from", issue = "33417")] + impl<'a, T> TryFrom<&'a [T]> for [T; $N] where T: Copy { + type Error = TryFromSliceError; + + fn try_from(slice: &[T]) -> Result<[T; $N], TryFromSliceError> { + <&Self>::try_from(slice).map(|r| *r) + } + } + #[unstable(feature = "try_from", issue = "33417")] impl<'a, T> TryFrom<&'a [T]> for &'a [T; $N] { type Error = TryFromSliceError; diff --git a/src/libcore/benches/char/methods.rs b/src/libcore/benches/char/methods.rs new file mode 100644 index 0000000000..faf820d871 --- /dev/null +++ b/src/libcore/benches/char/methods.rs @@ -0,0 +1,42 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use test::Bencher; + +const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9']; +const RADIX: [u32; 5] = [2, 8, 10, 16, 32]; + +#[bench] +fn bench_to_digit_radix_2(b: &mut Bencher) { + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(2)).min()) +} + +#[bench] +fn bench_to_digit_radix_10(b: &mut Bencher) { + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(10)).min()) +} + +#[bench] +fn bench_to_digit_radix_16(b: &mut Bencher) { + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(16)).min()) +} + +#[bench] +fn bench_to_digit_radix_36(b: &mut Bencher) { + b.iter(|| CHARS.iter().cycle().take(10_000).map(|c| c.to_digit(36)).min()) +} + +#[bench] +fn bench_to_digit_radix_var(b: &mut Bencher) { + b.iter(|| CHARS.iter().cycle() + .zip(RADIX.iter().cycle()) + .take(10_000) + .map(|(c, radix)| c.to_digit(*radix)).min()) +} diff --git a/src/test/ui/non_modrs_mods/modrs_mod/inner_modrs_mod/mod.rs b/src/libcore/benches/char/mod.rs similarity index 96% rename from src/test/ui/non_modrs_mods/modrs_mod/inner_modrs_mod/mod.rs rename to src/libcore/benches/char/mod.rs index 77cab97235..a656e82cb6 100644 --- a/src/test/ui/non_modrs_mods/modrs_mod/inner_modrs_mod/mod.rs +++ b/src/libcore/benches/char/mod.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub mod innest; +mod methods; diff --git a/src/libcore/benches/lib.rs b/src/libcore/benches/lib.rs index ced77d7791..d44f1577d5 100644 --- a/src/libcore/benches/lib.rs +++ b/src/libcore/benches/lib.rs @@ -15,6 +15,7 @@ extern crate core; extern crate test; mod any; +mod char; mod hash; mod iter; mod num; diff --git a/src/libcore/benches/num/mod.rs b/src/libcore/benches/num/mod.rs index 55f0bdb57e..b57e167b05 100644 --- a/src/libcore/benches/num/mod.rs +++ b/src/libcore/benches/num/mod.rs @@ -10,3 +10,108 @@ mod flt2dec; mod dec2flt; + +use test::Bencher; +use std::str::FromStr; + +const ASCII_NUMBERS: [&str; 19] = [ + "0", + "1", + "2", + "43", + "765", + "76567", + "987245987", + "-4aa32", + "1786235", + "8723095", + "f##5s", + "83638730", + "-2345", + "562aa43", + "-1", + "-0", + "abc", + "xyz", + "c0ffee", +]; + +macro_rules! from_str_bench { + ($mac:ident, $t:ty) => ( + #[bench] + fn $mac(b: &mut Bencher) { + b.iter(|| { + ASCII_NUMBERS + .iter() + .cycle() + .take(5_000) + .filter_map(|s| <($t)>::from_str(s).ok()) + .max() + }) + } + ) +} + +macro_rules! from_str_radix_bench { + ($mac:ident, $t:ty, $radix:expr) => ( + #[bench] + fn $mac(b: &mut Bencher) { + b.iter(|| { + ASCII_NUMBERS + .iter() + .cycle() + .take(5_000) + .filter_map(|s| <($t)>::from_str_radix(s, $radix).ok()) + .max() + }) + } + ) +} + +from_str_bench!(bench_u8_from_str, u8); +from_str_radix_bench!(bench_u8_from_str_radix_2, u8, 2); +from_str_radix_bench!(bench_u8_from_str_radix_10, u8, 10); +from_str_radix_bench!(bench_u8_from_str_radix_16, u8, 16); +from_str_radix_bench!(bench_u8_from_str_radix_36, u8, 36); + +from_str_bench!(bench_u16_from_str, u16); +from_str_radix_bench!(bench_u16_from_str_radix_2, u16, 2); +from_str_radix_bench!(bench_u16_from_str_radix_10, u16, 10); +from_str_radix_bench!(bench_u16_from_str_radix_16, u16, 16); +from_str_radix_bench!(bench_u16_from_str_radix_36, u16, 36); + +from_str_bench!(bench_u32_from_str, u32); +from_str_radix_bench!(bench_u32_from_str_radix_2, u32, 2); +from_str_radix_bench!(bench_u32_from_str_radix_10, u32, 10); +from_str_radix_bench!(bench_u32_from_str_radix_16, u32, 16); +from_str_radix_bench!(bench_u32_from_str_radix_36, u32, 36); + +from_str_bench!(bench_u64_from_str, u64); +from_str_radix_bench!(bench_u64_from_str_radix_2, u64, 2); +from_str_radix_bench!(bench_u64_from_str_radix_10, u64, 10); +from_str_radix_bench!(bench_u64_from_str_radix_16, u64, 16); +from_str_radix_bench!(bench_u64_from_str_radix_36, u64, 36); + +from_str_bench!(bench_i8_from_str, i8); +from_str_radix_bench!(bench_i8_from_str_radix_2, i8, 2); +from_str_radix_bench!(bench_i8_from_str_radix_10, i8, 10); +from_str_radix_bench!(bench_i8_from_str_radix_16, i8, 16); +from_str_radix_bench!(bench_i8_from_str_radix_36, i8, 36); + +from_str_bench!(bench_i16_from_str, i16); +from_str_radix_bench!(bench_i16_from_str_radix_2, i16, 2); +from_str_radix_bench!(bench_i16_from_str_radix_10, i16, 10); +from_str_radix_bench!(bench_i16_from_str_radix_16, i16, 16); +from_str_radix_bench!(bench_i16_from_str_radix_36, i16, 36); + +from_str_bench!(bench_i32_from_str, i32); +from_str_radix_bench!(bench_i32_from_str_radix_2, i32, 2); +from_str_radix_bench!(bench_i32_from_str_radix_10, i32, 10); +from_str_radix_bench!(bench_i32_from_str_radix_16, i32, 16); +from_str_radix_bench!(bench_i32_from_str_radix_36, i32, 36); + +from_str_bench!(bench_i64_from_str, i64); +from_str_radix_bench!(bench_i64_from_str_radix_2, i64, 2); +from_str_radix_bench!(bench_i64_from_str_radix_10, i64, 10); +from_str_radix_bench!(bench_i64_from_str_radix_16, i64, 16); +from_str_radix_bench!(bench_i64_from_str_radix_36, i64, 36); diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index ec7d366c3f..d8d51f5337 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -207,8 +207,8 @@ use ptr; /// /// # Examples /// -/// Here you can see how using `Cell` allows to use mutable field inside -/// immutable struct (which is also called 'interior mutability'). +/// In this example, you can see that `Cell` enables mutation inside an +/// immutable struct. In other words, it enables "interior mutability". /// /// ``` /// use std::cell::Cell; @@ -225,10 +225,11 @@ use ptr; /// /// let new_value = 100; /// -/// // ERROR, because my_struct is immutable +/// // ERROR: `my_struct` is immutable /// // my_struct.regular_field = new_value; /// -/// // WORKS, although `my_struct` is immutable, field `special_field` is mutable because it is Cell +/// // WORKS: although `my_struct` is immutable, `special_field` is a `Cell`, +/// // which can always be mutated /// my_struct.special_field.set(new_value); /// assert_eq!(my_struct.special_field.get(), new_value); /// ``` @@ -473,7 +474,7 @@ impl Cell { /// ``` #[inline] #[stable(feature = "cell_as_ptr", since = "1.12.0")] - pub fn as_ptr(&self) -> *mut T { + pub const fn as_ptr(&self) -> *mut T { self.value.get() } @@ -1507,8 +1508,10 @@ impl UnsafeCell { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn get(&self) -> *mut T { - &self.value as *const T as *mut T + pub const fn get(&self) -> *mut T { + // We can just cast the pointer from `UnsafeCell` to `T` because of + // #[repr(transparent)] + self as *const UnsafeCell as *const T as *mut T } } diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs index 64a17786b0..d6fcff644a 100644 --- a/src/libcore/char/methods.rs +++ b/src/libcore/char/methods.rs @@ -121,15 +121,24 @@ impl char { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_digit(self, radix: u32) -> Option { - if radix > 36 { - panic!("to_digit: radix is too high (maximum 36)"); - } - let val = match self { - '0' ..= '9' => self as u32 - '0' as u32, - 'a' ..= 'z' => self as u32 - 'a' as u32 + 10, - 'A' ..= 'Z' => self as u32 - 'A' as u32 + 10, - _ => return None, + assert!(radix <= 36, "to_digit: radix is too high (maximum 36)"); + + // the code is split up here to improve execution speed for cases where + // the `radix` is constant and 10 or smaller + let val = if radix <= 10 { + match self { + '0' ..= '9' => self as u32 - '0' as u32, + _ => return None, + } + } else { + match self { + '0'..='9' => self as u32 - '0' as u32, + 'a'..='z' => self as u32 - 'a' as u32 + 10, + 'A'..='Z' => self as u32 - 'A' as u32 + 10, + _ => return None, + } }; + if val < radix { Some(val) } else { None } } @@ -903,7 +912,7 @@ impl char { /// ``` #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] - pub fn is_ascii(&self) -> bool { + pub const fn is_ascii(&self) -> bool { *self as u32 <= 0x7F } diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index b900990d0a..dbc28ef7cf 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -104,7 +104,6 @@ /// assert_eq!(vec![1, 3], filtered); /// ``` #[unstable(feature = "convert_id", issue = "53500")] -#[rustc_const_unstable(feature = "const_convert_id")] #[inline] pub const fn identity(x: T) -> T { x } diff --git a/src/libcore/default.rs b/src/libcore/default.rs index ab36e29b1e..638acebd61 100644 --- a/src/libcore/default.rs +++ b/src/libcore/default.rs @@ -76,7 +76,7 @@ /// } /// /// impl Default for Kind { -/// fn default() -> Kind { Kind::A } +/// fn default() -> Self { Kind::A } /// } /// ``` /// @@ -118,7 +118,7 @@ pub trait Default: Sized { /// } /// /// impl Default for Kind { - /// fn default() -> Kind { Kind::A } + /// fn default() -> Self { Kind::A } /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/ffi.rs b/src/libcore/ffi.rs index a03756f9c2..edeb3b0d36 100644 --- a/src/libcore/ffi.rs +++ b/src/libcore/ffi.rs @@ -1,6 +1,7 @@ #![stable(feature = "", since = "1.30.0")] #![allow(non_camel_case_types)] +#![cfg_attr(stage0, allow(dead_code))] //! Utilities related to FFI bindings. @@ -40,3 +41,187 @@ impl fmt::Debug for c_void { f.pad("c_void") } } + +/// Basic implementation of a `va_list`. +#[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), + not(target_arch = "x86_64")), + windows))] +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +extern { + type VaListImpl; +} + +#[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), + not(target_arch = "x86_64")), + windows))] +impl fmt::Debug for VaListImpl { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "va_list* {:p}", self) + } +} + +/// AArch64 ABI implementation of a `va_list`. See the +/// [Aarch64 Procedure Call Standard] for more details. +/// +/// [AArch64 Procedure Call Standard]: +/// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf +#[cfg(all(target_arch = "aarch64", not(windows)))] +#[repr(C)] +#[derive(Debug)] +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +struct VaListImpl { + stack: *mut (), + gr_top: *mut (), + vr_top: *mut (), + gr_offs: i32, + vr_offs: i32, +} + +/// PowerPC ABI implementation of a `va_list`. +#[cfg(all(target_arch = "powerpc", not(windows)))] +#[repr(C)] +#[derive(Debug)] +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +struct VaListImpl { + gpr: u8, + fpr: u8, + reserved: u16, + overflow_arg_area: *mut (), + reg_save_area: *mut (), +} + +/// x86_64 ABI implementation of a `va_list`. +#[cfg(all(target_arch = "x86_64", not(windows)))] +#[repr(C)] +#[derive(Debug)] +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +struct VaListImpl { + gp_offset: i32, + fp_offset: i32, + overflow_arg_area: *mut (), + reg_save_area: *mut (), +} + +/// A wrapper for a `va_list` +#[lang = "va_list"] +#[derive(Debug)] +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +#[repr(transparent)] +#[cfg(not(stage0))] +pub struct VaList<'a>(&'a mut VaListImpl); + +// The VaArgSafe trait needs to be used in public interfaces, however, the trait +// itself must not be allowed to be used outside this module. Allowing users to +// implement the trait for a new type (thereby allowing the va_arg intrinsic to +// be used on a new type) is likely to cause undefined behavior. +// +// FIXME(dlrobertson): In order to use the VaArgSafe trait in a public interface +// but also ensure it cannot be used elsewhere, the trait needs to be public +// within a private module. Once RFC 2145 has been implemented look into +// improving this. +mod sealed_trait { + /// Trait which whitelists the allowed types to be used with [VaList::arg] + /// + /// [VaList::va_arg]: struct.VaList.html#method.arg + #[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] + pub trait VaArgSafe {} +} + +macro_rules! impl_va_arg_safe { + ($($t:ty),+) => { + $( + #[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] + impl sealed_trait::VaArgSafe for $t {} + )+ + } +} + +impl_va_arg_safe!{i8, i16, i32, i64, usize} +impl_va_arg_safe!{u8, u16, u32, u64, isize} +impl_va_arg_safe!{f64} + +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +impl sealed_trait::VaArgSafe for *mut T {} +#[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] +impl sealed_trait::VaArgSafe for *const T {} + +#[cfg(not(stage0))] +impl<'a> VaList<'a> { + /// Advance to the next arg. + #[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] + pub unsafe fn arg(&mut self) -> T { + va_arg(self) + } + + /// Copy the `va_list` at the current location. + #[unstable(feature = "c_variadic", + reason = "the `c_variadic` feature has not been properly tested on \ + all supported platforms", + issue = "27745")] + pub unsafe fn copy(&mut self, f: F) -> R + where F: for<'copy> FnOnce(VaList<'copy>) -> R { + #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), + not(target_arch = "x86_64")), + windows))] + let mut ap = va_copy(self); + #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), + not(windows)))] + let mut ap_inner = va_copy(self); + #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), + not(windows)))] + let mut ap = VaList(&mut ap_inner); + let ret = f(VaList(ap.0)); + va_end(&mut ap); + ret + } +} + +#[cfg(not(stage0))] +extern "rust-intrinsic" { + /// Destroy the arglist `ap` after initialization with `va_start` or + /// `va_copy`. + fn va_end(ap: &mut VaList); + + /// Copy the current location of arglist `src` to the arglist `dst`. + #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), + not(target_arch = "x86_64")), + windows))] + fn va_copy<'a>(src: &VaList<'a>) -> VaList<'a>; + #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), + not(windows)))] + fn va_copy(src: &VaList) -> VaListImpl; + + /// Loads an argument of type `T` from the `va_list` `ap` and increment the + /// argument `ap` points to. + fn va_arg(ap: &mut VaList) -> T; +} diff --git a/src/libcore/fmt/float.rs b/src/libcore/fmt/float.rs index 03e7a9a49d..3717a783f2 100644 --- a/src/libcore/fmt/float.rs +++ b/src/libcore/fmt/float.rs @@ -9,7 +9,7 @@ // except according to those terms. use fmt::{Formatter, Result, LowerExp, UpperExp, Display, Debug}; -use mem; +use mem::MaybeUninit; use num::flt2dec; // Don't inline this so callers don't use the stack space this function @@ -20,11 +20,14 @@ fn float_to_decimal_common_exact(fmt: &mut Formatter, num: &T, where T: flt2dec::DecodableFloat { unsafe { - let mut buf: [u8; 1024] = mem::uninitialized(); // enough for f32 and f64 - let mut parts: [flt2dec::Part; 4] = mem::uninitialized(); + let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64 + let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized(); + // FIXME(#53491): Technically, this is calling `get_mut` on an uninitialized + // `MaybeUninit` (here and elsewhere in this file). Revisit this once + // we decided whether that is valid or not. let formatted = flt2dec::to_exact_fixed_str(flt2dec::strategy::grisu::format_exact, *num, sign, precision, - false, &mut buf, &mut parts); + false, buf.get_mut(), parts.get_mut()); fmt.pad_formatted_parts(&formatted) } } @@ -38,10 +41,11 @@ fn float_to_decimal_common_shortest(fmt: &mut Formatter, num: &T, { unsafe { // enough for f32 and f64 - let mut buf: [u8; flt2dec::MAX_SIG_DIGITS] = mem::uninitialized(); - let mut parts: [flt2dec::Part; 4] = mem::uninitialized(); + let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized(); + let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized(); let formatted = flt2dec::to_shortest_str(flt2dec::strategy::grisu::format_shortest, *num, - sign, precision, false, &mut buf, &mut parts); + sign, precision, false, buf.get_mut(), + parts.get_mut()); fmt.pad_formatted_parts(&formatted) } } @@ -75,11 +79,11 @@ fn float_to_exponential_common_exact(fmt: &mut Formatter, num: &T, where T: flt2dec::DecodableFloat { unsafe { - let mut buf: [u8; 1024] = mem::uninitialized(); // enough for f32 and f64 - let mut parts: [flt2dec::Part; 6] = mem::uninitialized(); + let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64 + let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized(); let formatted = flt2dec::to_exact_exp_str(flt2dec::strategy::grisu::format_exact, *num, sign, precision, - upper, &mut buf, &mut parts); + upper, buf.get_mut(), parts.get_mut()); fmt.pad_formatted_parts(&formatted) } } @@ -94,11 +98,11 @@ fn float_to_exponential_common_shortest(fmt: &mut Formatter, { unsafe { // enough for f32 and f64 - let mut buf: [u8; flt2dec::MAX_SIG_DIGITS] = mem::uninitialized(); - let mut parts: [flt2dec::Part; 6] = mem::uninitialized(); + let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized(); + let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized(); let formatted = flt2dec::to_shortest_exp_str(flt2dec::strategy::grisu::format_shortest, *num, sign, (0, 0), upper, - &mut buf, &mut parts); + buf.get_mut(), parts.get_mut()); fmt.pad_formatted_parts(&formatted) } } diff --git a/src/libcore/future/future.rs b/src/libcore/future/future.rs index 9176e0d32c..0c870f9e40 100644 --- a/src/libcore/future/future.rs +++ b/src/libcore/future/future.rs @@ -17,7 +17,7 @@ use ops; use pin::Pin; use task::{Poll, LocalWaker}; -/// A future represents an asychronous computation. +/// A future represents an asynchronous computation. /// /// A future is a value that may not have finished computing yet. This kind of /// "asynchronous value" makes it possible for a thread to continue doing useful diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index cceae9249e..16f0299c18 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -672,6 +672,9 @@ extern "rust-intrinsic" { /// /// More specifically, this is the offset in bytes between successive /// items of the same type, including alignment padding. + /// + /// The stabilized version of this intrinsic is + /// [`std::mem::size_of`](../../std/mem/fn.size_of.html). pub fn size_of() -> usize; /// Moves a value to an uninitialized memory location. @@ -714,6 +717,10 @@ extern "rust-intrinsic" { /// initialize memory previous set to the result of `uninit`. pub fn uninit() -> T; + /// Moves a value out of scope without running drop glue. + #[cfg(not(stage0))] + pub fn forget(_: T); + /// Reinterprets the bits of a value of one type as another type. /// /// Both types must have the same size. Neither the original, nor the result, @@ -1465,6 +1472,20 @@ extern "rust-intrinsic" { /// y < 0 or y >= N, where N is the width of T in bits. pub fn unchecked_shr(x: T, y: T) -> T; + /// Performs rotate left. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `rotate_left` method. For example, + /// [`std::u32::rotate_left`](../../std/primitive.u32.html#method.rotate_left) + #[cfg(not(stage0))] + pub fn rotate_left(x: T, y: T) -> T; + + /// Performs rotate right. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `rotate_right` method. For example, + /// [`std::u32::rotate_right`](../../std/primitive.u32.html#method.rotate_right) + #[cfg(not(stage0))] + pub fn rotate_right(x: T, y: T) -> T; + /// Returns (a + b) mod 2N, where N is the width of T in bits. /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `wrapping_add` method. For example, diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index 1038a47d3f..3063cb1a7d 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -519,7 +519,7 @@ pub trait Iterator { /// element. /// /// `map()` transforms one iterator into another, by means of its argument: - /// something that implements `FnMut`. It produces a new iterator which + /// something that implements [`FnMut`]. It produces a new iterator which /// calls this closure on each element of the original iterator. /// /// If you are good at thinking in types, you can think of `map()` like this: @@ -533,6 +533,7 @@ pub trait Iterator { /// more idiomatic to use [`for`] than `map()`. /// /// [`for`]: ../../book/ch03-05-control-flow.html#looping-through-a-collection-with-for + /// [`FnMut`]: ../../std/ops/trait.FnMut.html /// /// # Examples /// @@ -1857,7 +1858,7 @@ pub trait Iterator { /// ``` /// let a = ["lol", "NaN", "2", "5"]; /// - /// let mut first_number = a.iter().find_map(|s| s.parse().ok()); + /// let first_number = a.iter().find_map(|s| s.parse().ok()); /// /// assert_eq!(first_number, Some(2)); /// ``` diff --git a/src/libcore/iter/mod.rs b/src/libcore/iter/mod.rs index 509068843d..62e1f9fcb6 100644 --- a/src/libcore/iter/mod.rs +++ b/src/libcore/iter/mod.rs @@ -112,10 +112,10 @@ //! //! // next() is the only required method //! fn next(&mut self) -> Option { -//! // increment our count. This is why we started at zero. +//! // Increment our count. This is why we started at zero. //! self.count += 1; //! -//! // check to see if we've finished counting or not. +//! // Check to see if we've finished counting or not. //! if self.count < 6 { //! Some(self.count) //! } else { @@ -339,6 +339,8 @@ pub use self::sources::{RepeatWith, repeat_with}; pub use self::sources::{Empty, empty}; #[stable(feature = "iter_once", since = "1.2.0")] pub use self::sources::{Once, once}; +#[unstable(feature = "iter_unfold", issue = "55977")] +pub use self::sources::{Unfold, unfold, Successors, successors}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend}; diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index 55addd86bc..f0fd07b43c 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -166,14 +166,14 @@ macro_rules! step_impl_no_between { } step_impl_unsigned!(usize u8 u16); -#[cfg(not(target_pointer_witdth = "16"))] +#[cfg(not(target_pointer_width = "16"))] step_impl_unsigned!(u32); -#[cfg(target_pointer_witdth = "16")] +#[cfg(target_pointer_width = "16")] step_impl_no_between!(u32); step_impl_signed!([isize: usize] [i8: u8] [i16: u16]); -#[cfg(not(target_pointer_witdth = "16"))] +#[cfg(not(target_pointer_width = "16"))] step_impl_signed!([i32: u32]); -#[cfg(target_pointer_witdth = "16")] +#[cfg(target_pointer_width = "16")] step_impl_no_between!(i32); #[cfg(target_pointer_width = "64")] step_impl_unsigned!(u64); diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs index d500cc99fa..f6a4a7a6fa 100644 --- a/src/libcore/iter/sources.rs +++ b/src/libcore/iter/sources.rs @@ -283,7 +283,7 @@ impl Default for Empty { /// assert_eq!(None, nope.next()); /// ``` #[stable(feature = "iter_empty", since = "1.2.0")] -pub fn empty() -> Empty { +pub const fn empty() -> Empty { Empty(marker::PhantomData) } @@ -386,3 +386,164 @@ impl FusedIterator for Once {} pub fn once(value: T) -> Once { Once { inner: Some(value).into_iter() } } + +/// Creates a new iterator where each iteration calls the provided closure +/// `F: FnMut(&mut St) -> Option`. +/// +/// This allows creating a custom iterator with any behavior +/// without using the more verbose syntax of creating a dedicated type +/// and implementing the `Iterator` trait for it. +/// +/// In addition to its captures and environment, +/// the closure is given a mutable reference to some state +/// that is preserved across iterations. +/// That state starts as the given `initial_state` value. +/// +/// Note that the `Unfold` iterator doesn’t make assumptions about the behavior of the closure, +/// and therefore conservatively does not implement [`FusedIterator`], +/// or override [`Iterator::size_hint`] from its default `(0, None)`. +/// +/// [`FusedIterator`]: trait.FusedIterator.html +/// [`Iterator::size_hint`]: trait.Iterator.html#method.size_hint +/// +/// # Examples +/// +/// Let’s re-implement the counter iterator from [module-level documentation]: +/// +/// [module-level documentation]: index.html +/// +/// ``` +/// #![feature(iter_unfold)] +/// let counter = std::iter::unfold(0, |count| { +/// // Increment our count. This is why we started at zero. +/// *count += 1; +/// +/// // Check to see if we've finished counting or not. +/// if *count < 6 { +/// Some(*count) +/// } else { +/// None +/// } +/// }); +/// assert_eq!(counter.collect::>(), &[1, 2, 3, 4, 5]); +/// ``` +#[inline] +#[unstable(feature = "iter_unfold", issue = "55977")] +pub fn unfold(initial_state: St, f: F) -> Unfold + where F: FnMut(&mut St) -> Option +{ + Unfold { + state: initial_state, + f, + } +} + +/// An iterator where each iteration calls the provided closure `F: FnMut(&mut St) -> Option`. +/// +/// This `struct` is created by the [`unfold`] function. +/// See its documentation for more. +/// +/// [`unfold`]: fn.unfold.html +#[derive(Clone)] +#[unstable(feature = "iter_unfold", issue = "55977")] +pub struct Unfold { + state: St, + f: F, +} + +#[unstable(feature = "iter_unfold", issue = "55977")] +impl Iterator for Unfold + where F: FnMut(&mut St) -> Option +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + (self.f)(&mut self.state) + } +} + +#[unstable(feature = "iter_unfold", issue = "55977")] +impl fmt::Debug for Unfold { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Unfold") + .field("state", &self.state) + .finish() + } +} + +/// Creates a new iterator where each successive item is computed based on the preceding one. +/// +/// The iterator starts with the given first item (if any) +/// and calls the given `FnMut(&T) -> Option` closure to compute each item’s successor. +/// +/// ``` +/// #![feature(iter_unfold)] +/// use std::iter::successors; +/// +/// let powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10)); +/// assert_eq!(powers_of_10.collect::>(), &[1, 10, 100, 1_000, 10_000]); +/// ``` +#[unstable(feature = "iter_unfold", issue = "55977")] +pub fn successors(first: Option, succ: F) -> Successors + where F: FnMut(&T) -> Option +{ + // If this function returned `impl Iterator` + // it could be based on `unfold` and not need a dedicated type. + // However having a named `Successors` type allows it to be `Clone` when `T` and `F` are. + Successors { + next: first, + succ, + } +} + +/// An new iterator where each successive item is computed based on the preceding one. +/// +/// This `struct` is created by the [`successors`] function. +/// See its documentation for more. +/// +/// [`successors`]: fn.successors.html +#[derive(Clone)] +#[unstable(feature = "iter_unfold", issue = "55977")] +pub struct Successors { + next: Option, + succ: F, +} + +#[unstable(feature = "iter_unfold", issue = "55977")] +impl Iterator for Successors + where F: FnMut(&T) -> Option +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + self.next.take().map(|item| { + self.next = (self.succ)(&item); + item + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.next.is_some() { + (1, None) + } else { + (0, Some(0)) + } + } +} + +#[unstable(feature = "iter_unfold", issue = "55977")] +impl FusedIterator for Successors + where F: FnMut(&T) -> Option +{} + +#[unstable(feature = "iter_unfold", issue = "55977")] +impl fmt::Debug for Successors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Successors") + .field("next", &self.next) + .finish() + } +} diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index f95f8e7dbc..d2c5a3bed2 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -960,7 +960,7 @@ impl Product> for Result /// /// Calling next on a fused iterator that has returned `None` once is guaranteed /// to return [`None`] again. This trait should be implemented by all iterators -/// that behave this way because it allows for some significant optimizations. +/// that behave this way because it allows optimizing [`Iterator::fuse`]. /// /// Note: In general, you should not use `FusedIterator` in generic bounds if /// you need a fused iterator. Instead, you should just call [`Iterator::fuse`] diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 4156b1bec9..726e891df0 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -87,14 +87,12 @@ #![feature(doc_spotlight)] #![feature(extern_types)] #![feature(fundamental)] -#![cfg_attr(stage0, feature(impl_header_lifetime_elision))] #![feature(intrinsics)] #![feature(lang_items)] #![feature(link_llvm_intrinsics)] #![feature(never_type)] #![feature(nll)] #![feature(exhaustive_patterns)] -#![feature(macro_at_most_once_rep)] #![feature(no_core)] #![feature(on_unimplemented)] #![feature(optin_builtin_traits)] @@ -107,6 +105,7 @@ #![feature(staged_api)] #![feature(stmt_expr_attributes)] #![feature(unboxed_closures)] +#![feature(unsized_locals)] #![feature(untagged_unions)] #![feature(unwind_attributes)] #![feature(doc_alias)] @@ -129,6 +128,7 @@ #![feature(const_transmute)] #![feature(reverse_bits)] #![feature(non_exhaustive)] +#![feature(structural_match)] #[prelude_import] #[allow(unused)] @@ -228,7 +228,7 @@ mod nonzero; mod tuple; mod unit; -// Pull in the the `coresimd` crate directly into libcore. This is where all the +// Pull in the `coresimd` crate directly into libcore. This is where all the // architecture-specific (and vendor-specific) intrinsics are defined. AKA // things like SIMD and such. Note that the actual source for all this lies in a // different repository, rust-lang-nursery/stdsimd. That's why the setup here is diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index a0c87f13e5..8b1855800c 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -278,14 +278,14 @@ macro_rules! debug_assert_ne { /// /// // The previous method of quick returning Errors /// fn write_to_file_using_try() -> Result<(), MyError> { -/// let mut file = try!(File::create("my_best_friends.txt")); -/// try!(file.write_all(b"This is a list of my best friends.")); +/// let mut file = r#try!(File::create("my_best_friends.txt")); +/// r#try!(file.write_all(b"This is a list of my best friends.")); /// Ok(()) /// } /// /// // This is equivalent to: /// fn write_to_file_using_match() -> Result<(), MyError> { -/// let mut file = try!(File::create("my_best_friends.txt")); +/// let mut file = r#try!(File::create("my_best_friends.txt")); /// match file.write_all(b"This is a list of my best friends.") { /// Ok(v) => v, /// Err(e) => return Err(From::from(e)), @@ -296,14 +296,14 @@ macro_rules! debug_assert_ne { #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] #[doc(alias = "?")] -macro_rules! try { +macro_rules! r#try { ($expr:expr) => (match $expr { $crate::result::Result::Ok(val) => val, $crate::result::Result::Err(err) => { return $crate::result::Result::Err($crate::convert::From::from(err)) } }); - ($expr:expr,) => (try!($expr)); + ($expr:expr,) => (r#try!($expr)); } /// Write formatted data into a buffer. @@ -350,9 +350,8 @@ macro_rules! try { /// assert_eq!(v, b"s = \"abc 123\""); /// ``` /// -/// Note: This macro can be used in `no_std` setups as well -/// In a `no_std` setup you are responsible for the -/// implementation details of the components. +/// Note: This macro can be used in `no_std` setups as well. +/// In a `no_std` setup you are responsible for the implementation details of the components. /// /// ```no_run /// # extern crate core; @@ -440,7 +439,7 @@ macro_rules! writeln { /// /// If the determination that the code is unreachable proves incorrect, the /// program immediately terminates with a [`panic!`]. The function [`unreachable_unchecked`], -/// which belongs to the [`std::hint`] module, informs the compilier to +/// which belongs to the [`std::hint`] module, informs the compiler to /// optimize the code out of the release version entirely. /// /// [`panic!`]: ../std/macro.panic.html diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 662a8ddd96..3bcdfabbb2 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -578,6 +578,7 @@ macro_rules! impls{ /// /// [drop check]: ../../nomicon/dropck.html #[lang = "phantom_data"] +#[structural_match] #[stable(feature = "rust1", since = "1.0.0")] pub struct PhantomData; diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 6de27d1b3a..e4b2800ae2 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -142,6 +142,19 @@ pub fn forget(t: T) { ManuallyDrop::new(t); } +/// Like [`forget`], but also accepts unsized values. +/// +/// This function is just a shim intended to be removed when the `unsized_locals` feature gets +/// stabilized. +/// +/// [`forget`]: fn.forget.html +#[inline] +#[cfg(not(stage0))] +#[unstable(feature = "forget_unsized", issue = "0")] +pub fn forget_unsized(t: T) { + unsafe { intrinsics::forget(t) } +} + /// Returns the size of a type in bytes. /// /// More specifically, this is the offset in bytes between successive elements @@ -201,7 +214,7 @@ pub fn forget(t: T) { /// /// ## Size of Enums /// -/// Enums that carry no data other than the descriminant have the same size as C enums +/// Enums that carry no data other than the discriminant have the same size as C enums /// on the platform they are compiled for. /// /// ## Size of Unions @@ -284,7 +297,7 @@ pub fn forget(t: T) { /// [alignment]: ./fn.align_of.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn size_of() -> usize { intrinsics::size_of::() } @@ -376,7 +389,7 @@ pub fn min_align_of_val(val: &T) -> usize { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn align_of() -> usize { intrinsics::min_align_of::() } @@ -457,19 +470,10 @@ pub fn align_of_val(val: &T) -> usize { #[inline] #[stable(feature = "needs_drop", since = "1.21.0")] #[rustc_const_unstable(feature = "const_needs_drop")] -#[cfg(not(stage0))] pub const fn needs_drop() -> bool { intrinsics::needs_drop::() } -#[inline] -#[stable(feature = "needs_drop", since = "1.21.0")] -#[cfg(stage0)] -/// Ceci n'est pas la documentation -pub fn needs_drop() -> bool { - unsafe { intrinsics::needs_drop::() } -} - /// Creates a value whose bytes are all zero. /// /// This has the same effect as allocating space with @@ -813,7 +817,7 @@ pub fn drop(_x: T) { } #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn transmute_copy(src: &T) -> U { - ptr::read(src as *const T as *const U) + ptr::read_unaligned(src as *const T as *const U) } /// Opaque type representing the discriminant of an enum. @@ -946,8 +950,7 @@ impl ManuallyDrop { /// ManuallyDrop::new(Box::new(())); /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] - #[rustc_const_unstable(feature = "const_manually_drop_new")] - #[inline] + #[inline(always)] pub const fn new(value: T) -> ManuallyDrop { ManuallyDrop { value } } @@ -964,10 +967,30 @@ impl ManuallyDrop { /// let _: Box<()> = ManuallyDrop::into_inner(x); // This drops the `Box`. /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] - #[inline] - pub fn into_inner(slot: ManuallyDrop) -> T { + #[inline(always)] + pub const fn into_inner(slot: ManuallyDrop) -> T { slot.value } + + /// Takes the contained value out. + /// + /// This method is primarily intended for moving out values in drop. + /// Instead of using [`ManuallyDrop::drop`] to manually drop the value, + /// you can use this method to take the value and use it however desired. + /// `Drop` will be invoked on the returned value following normal end-of-scope rules. + /// + /// If you have ownership of the container, you can use [`ManuallyDrop::into_inner`] instead. + /// + /// # Safety + /// + /// This function semantically moves out the contained value without preventing further usage. + /// It is up to the user of this method to ensure that this container is not used again. + #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"] + #[unstable(feature = "manually_drop_take", issue = "55422")] + #[inline] + pub unsafe fn take(slot: &mut ManuallyDrop) -> T { + ManuallyDrop::into_inner(ptr::read(slot)) + } } impl ManuallyDrop { @@ -992,16 +1015,16 @@ impl ManuallyDrop { #[stable(feature = "manually_drop", since = "1.20.0")] impl Deref for ManuallyDrop { type Target = T; - #[inline] - fn deref(&self) -> &Self::Target { + #[inline(always)] + fn deref(&self) -> &T { &self.value } } #[stable(feature = "manually_drop", since = "1.20.0")] impl DerefMut for ManuallyDrop { - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { + #[inline(always)] + fn deref_mut(&mut self) -> &mut T { &mut self.value } } @@ -1016,11 +1039,22 @@ pub union MaybeUninit { } impl MaybeUninit { + /// Create a new `MaybeUninit` initialized with the given value. + /// + /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. + /// It is your responsibility to make sure `T` gets dropped if it got initialized. + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + pub const fn new(val: T) -> MaybeUninit { + MaybeUninit { value: ManuallyDrop::new(val) } + } + /// Create a new `MaybeUninit` in an uninitialized state. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub const fn uninitialized() -> MaybeUninit { MaybeUninit { uninit: () } } @@ -1034,6 +1068,7 @@ impl MaybeUninit { /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline] pub fn zeroed() -> MaybeUninit { let mut u = MaybeUninit::::uninitialized(); unsafe { @@ -1044,6 +1079,7 @@ impl MaybeUninit { /// Set the value of the `MaybeUninit`. This overwrites any previous value without dropping it. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub fn set(&mut self, val: T) { unsafe { self.value = ManuallyDrop::new(val); @@ -1056,9 +1092,10 @@ impl MaybeUninit { /// /// # Unsafety /// - /// It is up to the caller to guarantee that the the `MaybeUninit` really is in an initialized + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub unsafe fn into_inner(self) -> T { ManuallyDrop::into_inner(self.value) } @@ -1067,9 +1104,10 @@ impl MaybeUninit { /// /// # Unsafety /// - /// It is up to the caller to guarantee that the the `MaybeUninit` really is in an initialized + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub unsafe fn get_ref(&self) -> &T { &*self.value } @@ -1078,9 +1116,13 @@ impl MaybeUninit { /// /// # Unsafety /// - /// It is up to the caller to guarantee that the the `MaybeUninit` really is in an initialized + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. + // FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references + // to uninitialized data (e.g. in `libcore/fmt/float.rs`). We should make + // a final decision about the rules before stabilization. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub unsafe fn get_mut(&mut self) -> &mut T { &mut *self.value } @@ -1088,6 +1130,7 @@ impl MaybeUninit { /// Get a pointer to the contained value. Reading from this pointer will be undefined /// behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub fn as_ptr(&self) -> *const T { unsafe { &*self.value as *const T } } @@ -1095,6 +1138,7 @@ impl MaybeUninit { /// Get a mutable pointer to the contained value. Reading from this pointer will be undefined /// behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { unsafe { &mut *self.value as *mut T } } diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 118e75e1ee..436cd1fc05 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -10,7 +10,7 @@ //! Exposes the NonZero lang item which provides optimization hints. -use ops::CoerceUnsized; +use ops::{CoerceUnsized, DispatchFromDyn}; /// A wrapper type for raw pointers and integers that will never be /// NULL or 0 that might allow certain optimizations. @@ -20,3 +20,5 @@ use ops::CoerceUnsized; pub(crate) struct NonZero(pub(crate) T); impl, U> CoerceUnsized> for NonZero {} + +impl, U> DispatchFromDyn> for NonZero {} diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs index 577c823f9a..d6c3996971 100644 --- a/src/libcore/num/f32.rs +++ b/src/libcore/num/f32.rs @@ -445,7 +445,7 @@ impl f32 { /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa. /// /// Rather than trying to preserve signaling-ness cross-platform, this - /// implementation favours preserving the exact bits. This means that + /// implementation favors preserving the exact bits. This means that /// any payloads encoded in NaNs will be preserved even if the result of /// this method is sent over the network from an x86 machine to a MIPS one. /// diff --git a/src/libcore/num/flt2dec/estimator.rs b/src/libcore/num/flt2dec/estimator.rs index d42e05a91f..4e33fcfd76 100644 --- a/src/libcore/num/flt2dec/estimator.rs +++ b/src/libcore/num/flt2dec/estimator.rs @@ -22,4 +22,3 @@ pub fn estimate_scaling_factor(mant: u64, exp: i16) -> i16 { // therefore this always underestimates (or is exact), but not much. (((nbits + exp as i64) * 1292913986) >> 32) as i16 } - diff --git a/src/libcore/num/flt2dec/mod.rs b/src/libcore/num/flt2dec/mod.rs index 21a2e72dac..d58015beec 100644 --- a/src/libcore/num/flt2dec/mod.rs +++ b/src/libcore/num/flt2dec/mod.rs @@ -658,4 +658,3 @@ pub fn to_exact_fixed_str<'a, T, F>(mut format_exact: F, v: T, } } } - diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 311447c844..805be43132 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -216,7 +216,7 @@ $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn min_value() -> Self { !0 ^ ((!0 as $UnsignedT) >> 1) as Self } @@ -235,7 +235,7 @@ $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn max_value() -> Self { !Self::min_value() } @@ -1921,12 +1921,10 @@ big-endian (network) byte order. # Examples ``` -#![feature(int_to_from_bytes)] - let bytes = ", $swap_op, stringify!($SelfT), ".to_be_bytes(); assert_eq!(bytes, ", $be_bytes, "); ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn to_be_bytes(self) -> [u8; mem::size_of::()] { @@ -1941,12 +1939,10 @@ little-endian byte order. # Examples ``` -#![feature(int_to_from_bytes)] - let bytes = ", $swap_op, stringify!($SelfT), ".to_le_bytes(); assert_eq!(bytes, ", $le_bytes, "); ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn to_le_bytes(self) -> [u8; mem::size_of::()] { @@ -1969,8 +1965,6 @@ instead. # Examples ``` -#![feature(int_to_from_bytes)] - let bytes = ", $swap_op, stringify!($SelfT), ".to_ne_bytes(); assert_eq!(bytes, if cfg!(target_endian = \"big\") { ", $be_bytes, " @@ -1978,7 +1972,7 @@ assert_eq!(bytes, if cfg!(target_endian = \"big\") { ", $le_bytes, " }); ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { @@ -1993,12 +1987,23 @@ big endian. # Examples ``` -#![feature(int_to_from_bytes)] - let value = ", stringify!($SelfT), "::from_be_bytes(", $be_bytes, "); assert_eq!(value, ", $swap_op, "); +``` + +When starting from a slice rather than an array, fallible conversion APIs can be used: + +``` +#![feature(try_from)] +use std::convert::TryInto; + +fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { + let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); + *input = rest; + ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) +} ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { @@ -2014,12 +2019,23 @@ little endian. # Examples ``` -#![feature(int_to_from_bytes)] - let value = ", stringify!($SelfT), "::from_le_bytes(", $le_bytes, "); assert_eq!(value, ", $swap_op, "); +``` + +When starting from a slice rather than an array, fallible conversion APIs can be used: + +``` +#![feature(try_from)] +use std::convert::TryInto; + +fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { + let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); + *input = rest; + ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) +} ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { @@ -2041,16 +2057,27 @@ appropriate instead. # Examples ``` -#![feature(int_to_from_bytes)] - let value = ", stringify!($SelfT), "::from_ne_bytes(if cfg!(target_endian = \"big\") { ", $be_bytes, " } else { ", $le_bytes, " }); assert_eq!(value, ", $swap_op, "); +``` + +When starting from a slice rather than an array, fallible conversion APIs can be used: + +``` +#![feature(try_from)] +use std::convert::TryInto; + +fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { + let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); + *input = rest; + ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) +} ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { @@ -2303,7 +2330,12 @@ assert_eq!(n.rotate_left(", $rot, "), m); #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_left(self, n: u32) -> Self { - (self << (n % $BITS)) | (self >> (($BITS - (n % $BITS)) % $BITS)) + #[cfg(not(stage0))] { + unsafe { intrinsics::rotate_left(self, n as $SelfT) } + } + #[cfg(stage0)] { + (self << (n % $BITS)) | (self >> (($BITS - (n % $BITS)) % $BITS)) + } } } @@ -2328,7 +2360,12 @@ assert_eq!(n.rotate_right(", $rot, "), m); #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_right(self, n: u32) -> Self { - (self >> (n % $BITS)) | (self << (($BITS - (n % $BITS)) % $BITS)) + #[cfg(not(stage0))] { + unsafe { intrinsics::rotate_right(self, n as $SelfT) } + } + #[cfg(stage0)] { + (self >> (n % $BITS)) | (self << (($BITS - (n % $BITS)) % $BITS)) + } } } @@ -3616,6 +3653,7 @@ assert_eq!(3", stringify!($SelfT), ".checked_next_power_of_two(), Some(4)); assert_eq!(", stringify!($SelfT), "::max_value().checked_next_power_of_two(), None);", $EndFeature, " ```"), + #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn checked_next_power_of_two(self) -> Option { self.one_less_than_next_power_of_two().checked_add(1) @@ -3653,12 +3691,10 @@ big-endian (network) byte order. # Examples ``` -#![feature(int_to_from_bytes)] - let bytes = ", $swap_op, stringify!($SelfT), ".to_be_bytes(); assert_eq!(bytes, ", $be_bytes, "); ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn to_be_bytes(self) -> [u8; mem::size_of::()] { @@ -3673,12 +3709,10 @@ little-endian byte order. # Examples ``` -#![feature(int_to_from_bytes)] - let bytes = ", $swap_op, stringify!($SelfT), ".to_le_bytes(); assert_eq!(bytes, ", $le_bytes, "); ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn to_le_bytes(self) -> [u8; mem::size_of::()] { @@ -3701,8 +3735,6 @@ instead. # Examples ``` -#![feature(int_to_from_bytes)] - let bytes = ", $swap_op, stringify!($SelfT), ".to_ne_bytes(); assert_eq!(bytes, if cfg!(target_endian = \"big\") { ", $be_bytes, " @@ -3710,7 +3742,7 @@ assert_eq!(bytes, if cfg!(target_endian = \"big\") { ", $le_bytes, " }); ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { @@ -3725,12 +3757,23 @@ big endian. # Examples ``` -#![feature(int_to_from_bytes)] - let value = ", stringify!($SelfT), "::from_be_bytes(", $be_bytes, "); assert_eq!(value, ", $swap_op, "); +``` + +When starting from a slice rather than an array, fallible conversion APIs can be used: + +``` +#![feature(try_from)] +use std::convert::TryInto; + +fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { + let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); + *input = rest; + ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) +} ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { @@ -3746,12 +3789,23 @@ little endian. # Examples ``` -#![feature(int_to_from_bytes)] - let value = ", stringify!($SelfT), "::from_le_bytes(", $le_bytes, "); assert_eq!(value, ", $swap_op, "); +``` + +When starting from a slice rather than an array, fallible conversion APIs can be used: + +``` +#![feature(try_from)] +use std::convert::TryInto; + +fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { + let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); + *input = rest; + ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) +} ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { @@ -3773,16 +3827,27 @@ appropriate instead. # Examples ``` -#![feature(int_to_from_bytes)] - let value = ", stringify!($SelfT), "::from_ne_bytes(if cfg!(target_endian = \"big\") { ", $be_bytes, " } else { ", $le_bytes, " }); assert_eq!(value, ", $swap_op, "); +``` + +When starting from a slice rather than an array, fallible conversion APIs can be used: + +``` +#![feature(try_from)] +use std::convert::TryInto; + +fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { + let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); + *input = rest; + ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) +} ```"), - #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { @@ -4773,15 +4838,38 @@ pub struct ParseIntError { kind: IntErrorKind, } +/// Enum to store the various types of errors that can cause parsing an integer to fail. +#[unstable(feature = "int_error_matching", + reason = "it can be useful to match errors when making error messages \ + for integer parsing", + issue = "22639")] #[derive(Debug, Clone, PartialEq, Eq)] -enum IntErrorKind { +#[non_exhaustive] +pub enum IntErrorKind { + /// Value being parsed is empty. + /// + /// Among other causes, this variant will be constructed when parsing an empty string. Empty, + /// Contains an invalid digit. + /// + /// Among other causes, this variant will be constructed when parsing a string that + /// contains a letter. InvalidDigit, + /// Integer is too large to store in target integer type. Overflow, + /// Integer is too small to store in target integer type. Underflow, } impl ParseIntError { + /// Outputs the detailed cause of parsing an integer failing. + #[unstable(feature = "int_error_matching", + reason = "it can be useful to match errors when making error messages \ + for integer parsing", + issue = "22639")] + pub fn kind(&self) -> &IntErrorKind { + &self.kind + } #[unstable(feature = "int_error_internals", reason = "available through Error trait and this method should \ not be exposed publicly", diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index 1c826c2fa7..00134a58d3 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -387,7 +387,7 @@ assert_eq!(n.count_ones(), 3); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn count_ones(self) -> u32 { + pub const fn count_ones(self) -> u32 { self.0.count_ones() } } @@ -407,7 +407,7 @@ assert_eq!(Wrapping(!0", stringify!($t), ").count_zeros(), 0); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn count_zeros(self) -> u32 { + pub const fn count_zeros(self) -> u32 { self.0.count_zeros() } } @@ -430,7 +430,7 @@ assert_eq!(n.trailing_zeros(), 3); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn trailing_zeros(self) -> u32 { + pub const fn trailing_zeros(self) -> u32 { self.0.trailing_zeros() } } @@ -456,7 +456,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn rotate_left(self, n: u32) -> Self { + pub const fn rotate_left(self, n: u32) -> Self { Wrapping(self.0.rotate_left(n)) } @@ -481,7 +481,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn rotate_right(self, n: u32) -> Self { + pub const fn rotate_right(self, n: u32) -> Self { Wrapping(self.0.rotate_right(n)) } @@ -505,7 +505,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn swap_bytes(self) -> Self { + pub const fn swap_bytes(self) -> Self { Wrapping(self.0.swap_bytes()) } @@ -532,7 +532,7 @@ assert_eq!(n.trailing_zeros(), 3); /// ``` #[unstable(feature = "reverse_bits", issue = "48763")] #[inline] - pub fn reverse_bits(self) -> Self { + pub const fn reverse_bits(self) -> Self { Wrapping(self.0.reverse_bits()) } @@ -560,7 +560,7 @@ if cfg!(target_endian = \"big\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn from_be(x: Self) -> Self { + pub const fn from_be(x: Self) -> Self { Wrapping(<$t>::from_be(x.0)) } } @@ -589,7 +589,7 @@ if cfg!(target_endian = \"little\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn from_le(x: Self) -> Self { + pub const fn from_le(x: Self) -> Self { Wrapping(<$t>::from_le(x.0)) } } @@ -618,7 +618,7 @@ if cfg!(target_endian = \"big\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn to_be(self) -> Self { + pub const fn to_be(self) -> Self { Wrapping(self.0.to_be()) } } @@ -647,7 +647,7 @@ if cfg!(target_endian = \"little\") { ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn to_le(self) -> Self { + pub const fn to_le(self) -> Self { Wrapping(self.0.to_le()) } } @@ -707,7 +707,7 @@ assert_eq!(n.leading_zeros(), 3); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn leading_zeros(self) -> u32 { + pub const fn leading_zeros(self) -> u32 { self.0.leading_zeros() } } @@ -784,7 +784,7 @@ assert!(!Wrapping(-10", stringify!($t), ").is_positive()); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn is_positive(self) -> bool { + pub const fn is_positive(self) -> bool { self.0.is_positive() } } @@ -806,7 +806,7 @@ assert!(!Wrapping(10", stringify!($t), ").is_negative()); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn is_negative(self) -> bool { + pub const fn is_negative(self) -> bool { self.0.is_negative() } } @@ -836,7 +836,7 @@ assert_eq!(n.leading_zeros(), 2); ```"), #[inline] #[unstable(feature = "wrapping_int_impl", issue = "32463")] - pub fn leading_zeros(self) -> u32 { + pub const fn leading_zeros(self) -> u32 { self.0.leading_zeros() } } diff --git a/src/libcore/ops/mod.rs b/src/libcore/ops/mod.rs index ce4f45762d..edfa6df11a 100644 --- a/src/libcore/ops/mod.rs +++ b/src/libcore/ops/mod.rs @@ -201,3 +201,6 @@ pub use self::generator::{Generator, GeneratorState}; #[unstable(feature = "coerce_unsized", issue = "27732")] pub use self::unsize::CoerceUnsized; + +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +pub use self::unsize::DispatchFromDyn; diff --git a/src/libcore/ops/range.rs b/src/libcore/ops/range.rs index fd3e50998f..908490e1c8 100644 --- a/src/libcore/ops/range.rs +++ b/src/libcore/ops/range.rs @@ -391,7 +391,7 @@ impl RangeInclusive { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn new(start: Idx, end: Idx) -> Self { Self { start, end, is_empty: None } } @@ -416,7 +416,7 @@ impl RangeInclusive { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn start(&self) -> &Idx { + pub const fn start(&self) -> &Idx { &self.start } @@ -440,7 +440,7 @@ impl RangeInclusive { /// ``` #[stable(feature = "inclusive_range_methods", since = "1.27.0")] #[inline] - pub fn end(&self) -> &Idx { + pub const fn end(&self) -> &Idx { &self.end } diff --git a/src/libcore/ops/unsize.rs b/src/libcore/ops/unsize.rs index da72f37484..4d9a40a1b9 100644 --- a/src/libcore/ops/unsize.rs +++ b/src/libcore/ops/unsize.rs @@ -43,7 +43,7 @@ use marker::Unsize; /// [nomicon-coerce]: ../../nomicon/coercions.html #[unstable(feature = "coerce_unsized", issue = "27732")] #[lang = "coerce_unsized"] -pub trait CoerceUnsized { +pub trait CoerceUnsized { // Empty. } @@ -77,3 +77,37 @@ impl, U: ?Sized> CoerceUnsized<*const U> for *mut T {} // *const T -> *const U #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized<*const U> for *const T {} + + +/// This is used for object safety, to check that a method's receiver type can be dispatched on. +/// +/// example impl: +/// +/// ``` +/// # #![feature(dispatch_from_dyn, unsize)] +/// # use std::{ops::DispatchFromDyn, marker::Unsize}; +/// # struct Rc(::std::rc::Rc); +/// impl DispatchFromDyn> for Rc +/// where +/// T: Unsize, +/// {} +/// ``` +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +#[cfg_attr(not(stage0), lang = "dispatch_from_dyn")] +pub trait DispatchFromDyn { + // Empty. +} + +// &T -> &U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<'a, T: ?Sized+Unsize, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {} +// &mut T -> &mut U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl<'a, T: ?Sized+Unsize, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {} +// *const T -> *const U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn<*const U> for *const T {} +// *mut T -> *mut U +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {} + diff --git a/src/libcore/panicking.rs b/src/libcore/panicking.rs index 58407de956..aa18a60fc0 100644 --- a/src/libcore/panicking.rs +++ b/src/libcore/panicking.rs @@ -39,9 +39,16 @@ use fmt; use panic::{Location, PanicInfo}; -#[cold] #[inline(never)] // this is the slow path, always +#[cold] +// never inline unless panic_immediate_abort to avoid code +// bloat at the call sites as much as possible +#[cfg_attr(not(feature="panic_immediate_abort"),inline(never))] #[lang = "panic"] pub fn panic(expr_file_line_col: &(&'static str, &'static str, u32, u32)) -> ! { + if cfg!(feature = "panic_immediate_abort") { + unsafe { super::intrinsics::abort() } + } + // Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially // reduce size overhead. The format_args! macro uses str's Display trait to // write expr, which calls Formatter::pad, which must accommodate string @@ -52,16 +59,27 @@ pub fn panic(expr_file_line_col: &(&'static str, &'static str, u32, u32)) -> ! { panic_fmt(fmt::Arguments::new_v1(&[expr], &[]), &(file, line, col)) } -#[cold] #[inline(never)] +#[cold] +#[cfg_attr(not(feature="panic_immediate_abort"),inline(never))] #[lang = "panic_bounds_check"] fn panic_bounds_check(file_line_col: &(&'static str, u32, u32), index: usize, len: usize) -> ! { + if cfg!(feature = "panic_immediate_abort") { + unsafe { super::intrinsics::abort() } + } + panic_fmt(format_args!("index out of bounds: the len is {} but the index is {}", len, index), file_line_col) } -#[cold] #[inline(never)] +#[cold] +#[cfg_attr(not(feature="panic_immediate_abort"),inline(never))] +#[cfg_attr( feature="panic_immediate_abort" ,inline)] pub fn panic_fmt(fmt: fmt::Arguments, file_line_col: &(&'static str, u32, u32)) -> ! { + if cfg!(feature = "panic_immediate_abort") { + unsafe { super::intrinsics::abort() } + } + // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call #[allow(improper_ctypes)] // PanicInfo contains a trait object which is not FFI safe extern "Rust" { diff --git a/src/libcore/pin.rs b/src/libcore/pin.rs index a03c080fb3..308dd9c79f 100644 --- a/src/libcore/pin.rs +++ b/src/libcore/pin.rs @@ -3,7 +3,7 @@ //! It is sometimes useful to have objects that are guaranteed to not move, //! in the sense that their placement in memory does not change, and can thus be relied upon. //! -//! A prime example of such a scenario would be building self-referencial structs, +//! A prime example of such a scenario would be building self-referential structs, //! since moving an object with pointers to itself will invalidate them, //! which could cause undefined behavior. //! @@ -39,7 +39,7 @@ //! use std::marker::Pinned; //! use std::ptr::NonNull; //! -//! // This is a self referencial struct since the slice field points to the data field. +//! // This is a self-referential struct since the slice field points to the data field. //! // We cannot inform the compiler about that with a normal reference, //! // since this pattern cannot be described with the usual borrowing rules. //! // Instead we use a raw pointer, though one which is known to not be null, @@ -91,7 +91,7 @@ use fmt; use marker::Sized; -use ops::{Deref, DerefMut, CoerceUnsized}; +use ops::{Deref, DerefMut, CoerceUnsized, DispatchFromDyn}; #[doc(inline)] pub use marker::Unpin; @@ -324,5 +324,11 @@ where P: CoerceUnsized, {} +#[unstable(feature = "pin", issue = "49150")] +impl<'a, P, U> DispatchFromDyn> for Pin

+where + P: DispatchFromDyn, +{} + #[unstable(feature = "pin", issue = "49150")] impl

Unpin for Pin

{} diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index ede24348fd..d3a74ed2a6 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -75,11 +75,11 @@ use convert::From; use intrinsics; -use ops::CoerceUnsized; +use ops::{CoerceUnsized, DispatchFromDyn}; use fmt; use hash; use marker::{PhantomData, Unsize}; -use mem; +use mem::{self, MaybeUninit}; use nonzero::NonZero; use cmp::Ordering::{self, Less, Equal, Greater}; @@ -120,7 +120,7 @@ pub use intrinsics::write_bytes; /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after /// calling `drop_in_place` can cause undefined behavior. Note that `*to_drop = -/// foo` counts as a use because it will cause the the value to be dropped +/// foo` counts as a use because it will cause the value to be dropped /// again. [`write`] can be used to overwrite data without causing it to be /// dropped. /// @@ -189,12 +189,22 @@ pub use intrinsics::write_bytes; /// i.e., you do not usually have to worry about such issues unless you call `drop_in_place` /// manually. #[stable(feature = "drop_in_place", since = "1.8.0")] +#[inline(always)] +pub unsafe fn drop_in_place(to_drop: *mut T) { + real_drop_in_place(&mut *to_drop) +} + +// The real `drop_in_place` -- the one that gets called implicitly when variables go +// out of scope -- should have a safe reference and not a raw pointer as argument +// type. When we drop a local variable, we access it with a pointer that behaves +// like a safe reference; transmuting that to a raw pointer does not mean we can +// actually access it with raw pointers. #[lang = "drop_in_place"] #[allow(unconditional_recursion)] -pub unsafe fn drop_in_place(to_drop: *mut T) { +unsafe fn real_drop_in_place(to_drop: &mut T) { // Code here does not matter - this is replaced by the // real drop glue by the compiler. - drop_in_place(to_drop); + real_drop_in_place(to_drop) } /// Creates a null raw pointer. @@ -209,7 +219,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn null() -> *const T { 0 as *const T } /// Creates a null mutable raw pointer. @@ -224,7 +234,7 @@ pub const fn null() -> *const T { 0 as *const T } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(not(stage0), rustc_promotable)] +#[rustc_promotable] pub const fn null_mut() -> *mut T { 0 as *mut T } /// Swaps the values at two mutable locations of the same type, without @@ -295,17 +305,14 @@ pub const fn null_mut() -> *mut T { 0 as *mut T } #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn swap(x: *mut T, y: *mut T) { - // Give ourselves some scratch space to work with - let mut tmp: T = mem::uninitialized(); + // Give ourselves some scratch space to work with. + // We do not have to worry about drops: `MaybeUninit` does nothing when dropped. + let mut tmp = MaybeUninit::::uninitialized(); // Perform the swap - copy_nonoverlapping(x, &mut tmp, 1); + copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); copy(y, x, 1); // `x` and `y` may overlap - copy_nonoverlapping(&tmp, y, 1); - - // y and t now point to the same thing, but we need to completely forget `tmp` - // because it's no longer relevant. - mem::forget(tmp); + copy_nonoverlapping(tmp.get_ref(), y, 1); } /// Swaps `count * size_of::()` bytes between the two regions of memory @@ -371,7 +378,7 @@ pub(crate) unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals - // that swapping either 32 bytes or 64 bytes at a time is most efficient for intel + // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel // Haswell E processors. LLVM is more able to optimize if we give a struct a // #[repr(simd)], even if we don't actually use this struct directly. // @@ -392,8 +399,8 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused - let mut t: Block = mem::uninitialized(); - let t = &mut t as *mut _ as *mut u8; + let mut t = mem::MaybeUninit::::uninitialized(); + let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); @@ -407,10 +414,10 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { if i < len { // Swap any remaining bytes - let mut t: UnalignedBlock = mem::uninitialized(); + let mut t = mem::MaybeUninit::::uninitialized(); let rem = len - i; - let t = &mut t as *mut _ as *mut u8; + let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); @@ -575,9 +582,9 @@ pub unsafe fn replace(dst: *mut T, mut src: T) -> T { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read(src: *const T) -> T { - let mut tmp: T = mem::uninitialized(); - copy_nonoverlapping(src, &mut tmp, 1); - tmp + let mut tmp = MaybeUninit::::uninitialized(); + copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); + tmp.into_inner() } /// Reads the value from `src` without moving it. This leaves the @@ -642,11 +649,11 @@ pub unsafe fn read(src: *const T) -> T { #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned(src: *const T) -> T { - let mut tmp: T = mem::uninitialized(); + let mut tmp = MaybeUninit::::uninitialized(); copy_nonoverlapping(src as *const u8, - &mut tmp as *mut T as *mut u8, + tmp.as_mut_ptr() as *mut u8, mem::size_of::()); - tmp + tmp.into_inner() } /// Overwrites a memory location with the given value without reading or @@ -1005,7 +1012,7 @@ impl *const T { /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of - /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can + /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` @@ -1625,7 +1632,7 @@ impl *mut T { /// # Null-unchecked version /// /// If you are sure the pointer can never be null and are looking for some kind of - /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>, know that you can + /// `as_ref_unchecked` that returns the `&T` instead of `Option<&T>`, know that you can /// dereference the pointer directly. /// /// ``` @@ -2795,6 +2802,9 @@ impl Copy for Unique { } #[unstable(feature = "ptr_internals", issue = "0")] impl CoerceUnsized> for Unique where T: Unsize { } +#[unstable(feature = "ptr_internals", issue = "0")] +impl DispatchFromDyn> for Unique where T: Unsize { } + #[unstable(feature = "ptr_internals", issue = "0")] impl fmt::Pointer for Unique { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -2867,6 +2877,7 @@ impl NonNull { /// sentinel value. Types that lazily allocate must track initialization by /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub fn dangling() -> Self { unsafe { let ptr = mem::align_of::() as *mut T; @@ -2882,12 +2893,14 @@ impl NonNull { /// /// `ptr` must be non-null. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { NonNull { pointer: NonZero(ptr as _) } } /// Creates a new `NonNull` if `ptr` is non-null. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub fn new(ptr: *mut T) -> Option { if !ptr.is_null() { Some(NonNull { pointer: NonZero(ptr as _) }) @@ -2898,7 +2911,8 @@ impl NonNull { /// Acquires the underlying `*mut` pointer. #[stable(feature = "nonnull", since = "1.25.0")] - pub fn as_ptr(self) -> *mut T { + #[inline] + pub const fn as_ptr(self) -> *mut T { self.pointer.0 as *mut T } @@ -2908,6 +2922,7 @@ impl NonNull { /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } @@ -2918,12 +2933,14 @@ impl NonNull { /// it were actually an instance of T that is getting borrowed. If a longer /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. #[stable(feature = "nonnull", since = "1.25.0")] + #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] + #[inline] pub fn cast(self) -> NonNull { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) @@ -2944,6 +2961,9 @@ impl Copy for NonNull { } #[unstable(feature = "coerce_unsized", issue = "27732")] impl CoerceUnsized> for NonNull where T: Unsize { } +#[unstable(feature = "dispatch_from_dyn", issue = "0")] +impl DispatchFromDyn> for NonNull where T: Unsize { } + #[stable(feature = "nonnull", since = "1.25.0")] impl fmt::Debug for NonNull { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -2963,6 +2983,7 @@ impl Eq for NonNull {} #[stable(feature = "nonnull", since = "1.25.0")] impl PartialEq for NonNull { + #[inline] fn eq(&self, other: &Self) -> bool { self.as_ptr() == other.as_ptr() } @@ -2970,6 +2991,7 @@ impl PartialEq for NonNull { #[stable(feature = "nonnull", since = "1.25.0")] impl Ord for NonNull { + #[inline] fn cmp(&self, other: &Self) -> Ordering { self.as_ptr().cmp(&other.as_ptr()) } @@ -2977,6 +2999,7 @@ impl Ord for NonNull { #[stable(feature = "nonnull", since = "1.25.0")] impl PartialOrd for NonNull { + #[inline] fn partial_cmp(&self, other: &Self) -> Option { self.as_ptr().partial_cmp(&other.as_ptr()) } @@ -2984,6 +3007,7 @@ impl PartialOrd for NonNull { #[stable(feature = "nonnull", since = "1.25.0")] impl hash::Hash for NonNull { + #[inline] fn hash(&self, state: &mut H) { self.as_ptr().hash(state) } @@ -2991,6 +3015,7 @@ impl hash::Hash for NonNull { #[unstable(feature = "ptr_internals", issue = "0")] impl From> for NonNull { + #[inline] fn from(unique: Unique) -> Self { NonNull { pointer: unique.pointer } } @@ -2998,6 +3023,7 @@ impl From> for NonNull { #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a mut T> for NonNull { + #[inline] fn from(reference: &'a mut T) -> Self { NonNull { pointer: NonZero(reference as _) } } @@ -3005,6 +3031,7 @@ impl<'a, T: ?Sized> From<&'a mut T> for NonNull { #[stable(feature = "nonnull", since = "1.25.0")] impl<'a, T: ?Sized> From<&'a T> for NonNull { + #[inline] fn from(reference: &'a T) -> Self { NonNull { pointer: NonZero(reference as _) } } diff --git a/src/libcore/raw.rs b/src/libcore/raw.rs index 495b9afe86..3d4bccb4f9 100644 --- a/src/libcore/raw.rs +++ b/src/libcore/raw.rs @@ -21,7 +21,7 @@ /// The representation of a trait object like `&SomeTrait`. /// /// This struct has the same layout as types like `&SomeTrait` and -/// `Box`. +/// `Box`. /// /// `TraitObject` is guaranteed to match layouts, but it is not the /// type of trait objects (e.g. the fields are not directly accessible diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index 8a6b212020..8c55a16f3c 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -385,7 +385,6 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[rustc_const_unstable(feature = "const_slice_as_ptr")] pub const fn as_ptr(&self) -> *const T { self as *const [T] as *const T } @@ -1511,6 +1510,22 @@ impl [T] { /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), /// and `O(n log n)` worst-case. /// + /// The comparator function must define a total ordering for the elements in the slice. If + /// the ordering is not total, the order of the elements is unspecified. An order is a + /// total order if it is (for all a, b and c): + /// + /// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and + /// * transitive, a < b and b < c implies a < c. The same must hold for both == and >. + /// + /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use + /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`. + /// + /// ``` + /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0]; + /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap()); + /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]); + /// ``` + /// /// # Current implementation /// /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs index 0d182b8497..0715373530 100644 --- a/src/libcore/slice/rotate.rs +++ b/src/libcore/slice/rotate.rs @@ -9,7 +9,7 @@ // except according to those terms. use cmp; -use mem; +use mem::{self, MaybeUninit}; use ptr; /// Rotation is much faster if it has access to a little bit of memory. This @@ -26,12 +26,6 @@ union RawArray { } impl RawArray { - fn new() -> Self { - unsafe { mem::uninitialized() } - } - fn ptr(&self) -> *mut T { - unsafe { &self.typed as *const T as *mut T } - } fn cap() -> usize { if mem::size_of::() == 0 { usize::max_value() @@ -88,8 +82,8 @@ pub unsafe fn ptr_rotate(mut left: usize, mid: *mut T, mut right: usize) { } } - let rawarray = RawArray::new(); - let buf = rawarray.ptr(); + let mut rawarray = MaybeUninit::>::uninitialized(); + let buf = &mut (*rawarray.as_mut_ptr()).typed as *mut [T; 2] as *mut T; let dim = mid.sub(left).add(right); if left <= right { diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs index e4c1fd03f9..affe84fbef 100644 --- a/src/libcore/slice/sort.rs +++ b/src/libcore/slice/sort.rs @@ -17,7 +17,7 @@ //! stable sorting implementation. use cmp; -use mem; +use mem::{self, MaybeUninit}; use ptr; /// When dropped, copies from `src` into `dest`. @@ -226,14 +226,14 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut block_l = BLOCK; let mut start_l = ptr::null_mut(); let mut end_l = ptr::null_mut(); - let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() }; + let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized(); // The current block on the right side (from `r.sub(block_r)` to `r`). let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); let mut end_r = ptr::null_mut(); - let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() }; + let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized(); // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient. @@ -272,8 +272,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize if start_l == end_l { // Trace `block_l` elements from the left side. - start_l = offsets_l.as_mut_ptr(); - end_l = offsets_l.as_mut_ptr(); + start_l = offsets_l.as_mut_ptr() as *mut u8; + end_l = offsets_l.as_mut_ptr() as *mut u8; let mut elem = l; for i in 0..block_l { @@ -288,8 +288,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize if start_r == end_r { // Trace `block_r` elements from the right side. - start_r = offsets_r.as_mut_ptr(); - end_r = offsets_r.as_mut_ptr(); + start_r = offsets_r.as_mut_ptr() as *mut u8; + end_r = offsets_r.as_mut_ptr() as *mut u8; let mut elem = r; for i in 0..block_r { diff --git a/src/libcore/str/lossy.rs b/src/libcore/str/lossy.rs index 186d6adbc9..52abd8f995 100644 --- a/src/libcore/str/lossy.rs +++ b/src/libcore/str/lossy.rs @@ -62,18 +62,15 @@ impl<'a> Iterator for Utf8LossyChunksIter<'a> { } const TAG_CONT_U8: u8 = 128; - fn unsafe_get(xs: &[u8], i: usize) -> u8 { - unsafe { *xs.get_unchecked(i) } - } fn safe_get(xs: &[u8], i: usize) -> u8 { - if i >= xs.len() { 0 } else { unsafe_get(xs, i) } + *xs.get(i).unwrap_or(&0) } let mut i = 0; while i < self.source.len() { let i_ = i; - let byte = unsafe_get(self.source, i); + let byte = unsafe { *self.source.get_unchecked(i) }; i += 1; if byte < 128 { diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index a2782dd8e2..89efa120a6 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -1424,10 +1424,8 @@ fn contains_nonascii(x: usize) -> bool { (x & NONASCII_MASK) != 0 } -/// Walks through `iter` checking that it's a valid UTF-8 sequence, -/// returning `true` in that case, or, if it is invalid, `false` with -/// `iter` reset such that it is pointing at the first byte in the -/// invalid sequence. +/// Walks through `v` checking that it's a valid UTF-8 sequence, +/// returning `Ok(())` in that case, or, if it is invalid, `Err(err)`. #[inline] fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { let mut index = 0; @@ -1896,7 +1894,7 @@ mod traits { #[inline] fn index_mut(self, slice: &mut str) -> &mut Self::Output { // is_char_boundary checks that the index is in [0, .len()] - // canot reuse `get` as above, because of NLL trouble + // cannot reuse `get` as above, because of NLL trouble if self.start <= self.end && slice.is_char_boundary(self.start) && slice.is_char_boundary(self.end) { @@ -2277,7 +2275,6 @@ impl str { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - #[rustc_const_unstable(feature = "const_str_as_ptr")] pub const fn as_ptr(&self) -> *const u8 { self as *const str as *const u8 } diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index f130dbfb0e..27eeb045bb 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -124,6 +124,7 @@ pub fn spin_loop_hint() { /// [`bool`]: ../../../std/primitive.bool.html #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] +#[repr(C, align(1))] pub struct AtomicBool { v: UnsafeCell, } @@ -147,6 +148,9 @@ unsafe impl Sync for AtomicBool {} /// This type has the same in-memory representation as a `*mut T`. #[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))] +#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] +#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] pub struct AtomicPtr { p: UnsafeCell<*mut T>, } @@ -169,11 +173,11 @@ unsafe impl Sync for AtomicPtr {} /// Atomic memory orderings /// -/// Memory orderings limit the ways that both the compiler and CPU may reorder -/// instructions around atomic operations. At its most restrictive, -/// "sequentially consistent" atomics allow neither reads nor writes -/// to be moved either before or after the atomic operation; on the other end -/// "relaxed" atomics allow all reorderings. +/// Memory orderings specify the way atomic operations synchronize memory. +/// In its weakest [`Relaxed`][Ordering::Relaxed], only the memory directly touched by the +/// operation is synchronized. On the other hand, a store-load pair of [`SeqCst`][Ordering::SeqCst] +/// operations synchronize other memory while additionally preserving a total order of such +/// operations across all threads. /// /// Rust's memory orderings are [the same as /// LLVM's](https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). @@ -181,6 +185,8 @@ unsafe impl Sync for AtomicPtr {} /// For more information see the [nomicon]. /// /// [nomicon]: ../../../nomicon/atomics.html +/// [Ordering::Relaxed]: #variant.Relaxed +/// [Ordering::SeqCst]: #variant.SeqCst #[stable(feature = "rust1", since = "1.0.0")] #[derive(Copy, Clone, Debug)] #[non_exhaustive] @@ -230,8 +236,8 @@ pub enum Ordering { /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering. /// /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up - /// not performing any store and hence it has just `Acquire` ordering. However, - /// `AcqRel` will never perform [`Relaxed`] accesses. + /// not performing any store and hence it has just [`Acquire`] ordering. However, + /// [`AcqRel`][`AcquireRelease`] will never perform [`Relaxed`] accesses. /// /// This ordering is only applicable for operations that combine both loads and stores. /// @@ -1088,6 +1094,7 @@ macro_rules! atomic_int { $s_int_type:expr, $int_ref:expr, $extra_feature:expr, $min_fn:ident, $max_fn:ident, + $align:expr, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. /// @@ -1101,6 +1108,7 @@ macro_rules! atomic_int { /// /// [module-level documentation]: index.html #[$stable] + #[repr(C, align($align))] pub struct $atomic_type { v: UnsafeCell<$int_type>, } @@ -1831,6 +1839,7 @@ atomic_int! { "i8", "../../../std/primitive.i8.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 1, i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic = "8")] @@ -1844,6 +1853,7 @@ atomic_int! { "u8", "../../../std/primitive.u8.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 1, u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic = "16")] @@ -1857,6 +1867,7 @@ atomic_int! { "i16", "../../../std/primitive.i16.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 2, i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic = "16")] @@ -1870,6 +1881,7 @@ atomic_int! { "u16", "../../../std/primitive.u16.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 2, u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic = "32")] @@ -1883,6 +1895,7 @@ atomic_int! { "i32", "../../../std/primitive.i32.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 4, i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic = "32")] @@ -1896,6 +1909,7 @@ atomic_int! { "u32", "../../../std/primitive.u32.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 4, u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic = "64")] @@ -1909,6 +1923,7 @@ atomic_int! { "i64", "../../../std/primitive.i64.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, + 8, i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic = "64")] @@ -1922,8 +1937,49 @@ atomic_int! { "u64", "../../../std/primitive.u64.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, + 8, u64 AtomicU64 ATOMIC_U64_INIT } +#[cfg(all(not(stage0), target_has_atomic = "128"))] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + "i128", "../../../std/primitive.i128.html", + "#![feature(integer_atomics)]\n\n", + atomic_min, atomic_max, + 16, + i128 AtomicI128 ATOMIC_I128_INIT +} +#[cfg(all(not(stage0), target_has_atomic = "128"))] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + "u128", "../../../std/primitive.u128.html", + "#![feature(integer_atomics)]\n\n", + atomic_umin, atomic_umax, + 16, + u128 AtomicU128 ATOMIC_U128_INIT +} +#[cfg(target_pointer_width = "16")] +macro_rules! ptr_width { + () => { 2 } +} +#[cfg(target_pointer_width = "32")] +macro_rules! ptr_width { + () => { 4 } +} +#[cfg(target_pointer_width = "64")] +macro_rules! ptr_width { + () => { 8 } +} #[cfg(target_has_atomic = "ptr")] atomic_int!{ stable(feature = "rust1", since = "1.0.0"), @@ -1935,6 +1991,7 @@ atomic_int!{ "isize", "../../../std/primitive.isize.html", "", atomic_min, atomic_max, + ptr_width!(), isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic = "ptr")] @@ -1948,6 +2005,7 @@ atomic_int!{ "usize", "../../../std/primitive.usize.html", "", atomic_umin, atomic_umax, + ptr_width!(), usize AtomicUsize ATOMIC_USIZE_INIT } diff --git a/src/libcore/task/wake.rs b/src/libcore/task/wake.rs index c9fb22e008..c0ce7255d6 100644 --- a/src/libcore/task/wake.rs +++ b/src/libcore/task/wake.rs @@ -108,7 +108,7 @@ impl Drop for Waker { /// is ready to be run. /// /// This is similar to the `Waker` type, but cannot be sent across threads. -/// Task executors can use this type to implement more optimized singlethreaded wakeup +/// Task executors can use this type to implement more optimized single-threaded wakeup /// behavior. #[repr(transparent)] #[derive(Clone)] diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index ec09071b3d..495483db55 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -1759,6 +1759,17 @@ fn test_repeat_with_take_collect() { assert_eq!(v, vec![1, 2, 4, 8, 16]); } +#[test] +fn test_successors() { + let mut powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10)); + assert_eq!(powers_of_10.by_ref().collect::>(), &[1, 10, 100, 1_000, 10_000]); + assert_eq!(powers_of_10.next(), None); + + let mut empty = successors(None::, |_| unimplemented!()); + assert_eq!(empty.next(), None); + assert_eq!(empty.next(), None); +} + #[test] fn test_fuse() { let mut it = 0..3; diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index d340924aab..7d62b4fa90 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -19,7 +19,7 @@ #![feature(flt2dec)] #![feature(fmt_internals)] #![feature(hashmap_internals)] -#![cfg_attr(stage0, feature(impl_header_lifetime_elision))] +#![feature(iter_unfold)] #![feature(pattern)] #![feature(range_is_empty)] #![feature(raw)] diff --git a/src/libcore/tests/num/mod.rs b/src/libcore/tests/num/mod.rs index ab96d3126b..0928f7560e 100644 --- a/src/libcore/tests/num/mod.rs +++ b/src/libcore/tests/num/mod.rs @@ -82,36 +82,28 @@ fn from_str_issue7588() { #[test] fn test_int_from_str_overflow() { - let mut i8_val: i8 = 127; - assert_eq!("127".parse::().ok(), Some(i8_val)); + assert_eq!("127".parse::().ok(), Some(127i8)); assert_eq!("128".parse::().ok(), None); - i8_val = i8_val.wrapping_add(1); - assert_eq!("-128".parse::().ok(), Some(i8_val)); + assert_eq!("-128".parse::().ok(), Some(-128i8)); assert_eq!("-129".parse::().ok(), None); - let mut i16_val: i16 = 32_767; - assert_eq!("32767".parse::().ok(), Some(i16_val)); + assert_eq!("32767".parse::().ok(), Some(32_767i16)); assert_eq!("32768".parse::().ok(), None); - i16_val = i16_val.wrapping_add(1); - assert_eq!("-32768".parse::().ok(), Some(i16_val)); + assert_eq!("-32768".parse::().ok(), Some(-32_768i16)); assert_eq!("-32769".parse::().ok(), None); - let mut i32_val: i32 = 2_147_483_647; - assert_eq!("2147483647".parse::().ok(), Some(i32_val)); + assert_eq!("2147483647".parse::().ok(), Some(2_147_483_647i32)); assert_eq!("2147483648".parse::().ok(), None); - i32_val = i32_val.wrapping_add(1); - assert_eq!("-2147483648".parse::().ok(), Some(i32_val)); + assert_eq!("-2147483648".parse::().ok(), Some(-2_147_483_648i32)); assert_eq!("-2147483649".parse::().ok(), None); - let mut i64_val: i64 = 9_223_372_036_854_775_807; - assert_eq!("9223372036854775807".parse::().ok(), Some(i64_val)); + assert_eq!("9223372036854775807".parse::().ok(), Some(9_223_372_036_854_775_807i64)); assert_eq!("9223372036854775808".parse::().ok(), None); - i64_val = i64_val.wrapping_add(1); - assert_eq!("-9223372036854775808".parse::().ok(), Some(i64_val)); + assert_eq!("-9223372036854775808".parse::().ok(), Some(-9_223_372_036_854_775_808i64)); assert_eq!("-9223372036854775809".parse::().ok(), None); } diff --git a/src/libcore/time.rs b/src/libcore/time.rs index 81ae8ade12..938e97503d 100644 --- a/src/libcore/time.rs +++ b/src/libcore/time.rs @@ -109,7 +109,7 @@ impl Duration { /// ``` #[stable(feature = "duration", since = "1.3.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_secs(secs: u64) -> Duration { Duration { secs, nanos: 0 } } @@ -128,7 +128,7 @@ impl Duration { /// ``` #[stable(feature = "duration", since = "1.3.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_millis(millis: u64) -> Duration { Duration { secs: millis / MILLIS_PER_SEC, @@ -150,7 +150,7 @@ impl Duration { /// ``` #[stable(feature = "duration_from_micros", since = "1.27.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_micros(micros: u64) -> Duration { Duration { secs: micros / MICROS_PER_SEC, @@ -172,7 +172,7 @@ impl Duration { /// ``` #[stable(feature = "duration_extras", since = "1.27.0")] #[inline] - #[cfg_attr(not(stage0), rustc_promotable)] + #[rustc_promotable] pub const fn from_nanos(nanos: u64) -> Duration { Duration { secs: nanos / (NANOS_PER_SEC as u64), @@ -209,7 +209,6 @@ impl Duration { /// /// [`subsec_nanos`]: #method.subsec_nanos #[stable(feature = "duration", since = "1.3.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn as_secs(&self) -> u64 { self.secs } @@ -229,7 +228,6 @@ impl Duration { /// assert_eq!(duration.subsec_millis(), 432); /// ``` #[stable(feature = "duration_extras", since = "1.27.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn subsec_millis(&self) -> u32 { self.nanos / NANOS_PER_MILLI } @@ -249,7 +247,6 @@ impl Duration { /// assert_eq!(duration.subsec_micros(), 234_567); /// ``` #[stable(feature = "duration_extras", since = "1.27.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn subsec_micros(&self) -> u32 { self.nanos / NANOS_PER_MICRO } @@ -269,7 +266,6 @@ impl Duration { /// assert_eq!(duration.subsec_nanos(), 10_000_000); /// ``` #[stable(feature = "duration", since = "1.3.0")] - #[rustc_const_unstable(feature="duration_getters")] #[inline] pub const fn subsec_nanos(&self) -> u32 { self.nanos } @@ -286,7 +282,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_as_u128", issue = "50202")] #[inline] - pub fn as_millis(&self) -> u128 { + pub const fn as_millis(&self) -> u128 { self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128 } @@ -303,7 +299,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_as_u128", issue = "50202")] #[inline] - pub fn as_micros(&self) -> u128 { + pub const fn as_micros(&self) -> u128 { self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128 } @@ -320,7 +316,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_as_u128", issue = "50202")] #[inline] - pub fn as_nanos(&self) -> u128 { + pub const fn as_nanos(&self) -> u128 { self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128 } @@ -478,7 +474,7 @@ impl Duration { /// ``` #[unstable(feature = "duration_float", issue = "54361")] #[inline] - pub fn as_float_secs(&self) -> f64 { + pub const fn as_float_secs(&self) -> f64 { (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64) } diff --git a/src/libcore/unicode/tables.rs b/src/libcore/unicode/tables.rs index 3de855ac94..e525c05740 100644 --- a/src/libcore/unicode/tables.rs +++ b/src/libcore/unicode/tables.rs @@ -2598,4 +2598,3 @@ pub mod conversions { ]; } - diff --git a/src/liblibc/.travis.yml b/src/liblibc/.travis.yml index 1905b0d4fb..c2f9c073e1 100644 --- a/src/liblibc/.travis.yml +++ b/src/liblibc/.travis.yml @@ -69,7 +69,6 @@ matrix: RUSTFLAGS=-Clink-arg=-mios-simulator-version-min=7.0 before_install: rustc ./ci/ios/deploy_and_run_on_ios_simulator.rs -o $HOME/runtest - - env: TARGET=x86_64-rumprun-netbsd - env: TARGET=powerpc-unknown-linux-gnu - env: TARGET=powerpc64-unknown-linux-gnu - env: TARGET=powerpc64le-unknown-linux-gnu diff --git a/src/liblibc/Cargo.lock b/src/liblibc/Cargo.lock index c47d80d81f..d2c9f9f9b0 100644 --- a/src/liblibc/Cargo.lock +++ b/src/liblibc/Cargo.lock @@ -20,7 +20,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ctest" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -85,7 +85,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "libc-test" version = "0.1.0" dependencies = [ - "ctest 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ctest 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.43", ] @@ -281,7 +281,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c54bb8f454c567f21197eefcdbf5679d0bd99f2ddbe52e84c77061952e6789" "checksum cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "2119ea4867bd2b8ed3aecab467709720b2d55b1bcfe09f772fd68066eaf15275" "checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3" -"checksum ctest 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bfb5b62c8bba3ca51cb21a3d3f8506074d1364ca5f53cf28ed1c07311bb1080c" +"checksum ctest 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8642e4e5ef24727d537ae8961ea295a334d670eb0934b51f2a72592e5c50007f" "checksum dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6d301140eb411af13d3115f9a562c85cc6b541ade9dfa314132244aaee7489dd" "checksum extprim 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "054bc2552b3f66fa8097e29e47255bfff583c08e737a67cbbb54b817ddaa5206" "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" diff --git a/src/liblibc/Cargo.toml b/src/liblibc/Cargo.toml index a9f0f59603..9427da76cf 100644 --- a/src/liblibc/Cargo.toml +++ b/src/liblibc/Cargo.toml @@ -12,6 +12,7 @@ description = """ A library for types and bindings to native C functions often found in libc or other common platform libraries. """ +build = "build.rs" [badges] travis-ci = { repository = "rust-lang/libc" } diff --git a/src/liblibc/build.rs b/src/liblibc/build.rs new file mode 100644 index 0000000000..aa56ea0545 --- /dev/null +++ b/src/liblibc/build.rs @@ -0,0 +1,35 @@ +use std::env; +use std::process::Command; +use std::str; + +fn main() { + /* + * If `core::ffi::c_void` exists, libc can just re-export it. Otherwise, it + * must define an incompatible type to retain backwards-compatibility. + */ + if rustc_minor_version().expect("Failed to get rustc version") >= 31 { + println!("cargo:rustc-cfg=core_cvoid"); + } +} + +fn rustc_minor_version() -> Option { + macro_rules! otry { + ($e:expr) => { + match $e { + Some(e) => e, + None => return None, + } + }; + } + + let rustc = otry!(env::var_os("RUSTC")); + let output = otry!(Command::new(rustc).arg("--version").output().ok()); + let version = otry!(str::from_utf8(&output.stdout).ok()); + let mut pieces = version.split('.'); + + if pieces.next() != Some("rustc 1") { + return None; + } + + otry!(pieces.next()).parse().ok() +} diff --git a/src/liblibc/libc-test/Cargo.toml b/src/liblibc/libc-test/Cargo.toml index 79fcf5f01e..0ff335f87e 100644 --- a/src/liblibc/libc-test/Cargo.toml +++ b/src/liblibc/libc-test/Cargo.toml @@ -9,7 +9,7 @@ path = ".." default-features = false [build-dependencies] -ctest = "0.2" +ctest = "0.2.2" [features] default = [ "use_std" ] diff --git a/src/liblibc/libc-test/build.rs b/src/liblibc/libc-test/build.rs index d46372b44b..db6588b802 100644 --- a/src/liblibc/libc-test/build.rs +++ b/src/liblibc/libc-test/build.rs @@ -358,7 +358,15 @@ fn main() { "Dl_info" | "DIR" | "Elf32_Phdr" | - "Elf64_Phdr" => ty.to_string(), + "Elf64_Phdr" | + "Elf32_Shdr" | + "Elf64_Shdr" | + "Elf32_Sym" | + "Elf64_Sym" | + "Elf32_Ehdr" | + "Elf64_Ehdr" | + "Elf32_Chdr" | + "Elf64_Chdr" => ty.to_string(), // Fixup a few types on windows that don't actually exist. "time64_t" if windows => "__time64_t".to_string(), @@ -548,7 +556,8 @@ fn main() { "PD_CLOEXEC" | "PD_ALLOWED_AT_FORK" if freebsd => true, // These constants were added in FreeBSD 12 - "SF_USER_READAHEAD" if freebsd => true, + "SF_USER_READAHEAD" | + "SO_REUSEPORT_LB" if freebsd => true, // These OSX constants are removed in Sierra. // https://developer.apple.com/library/content/releasenotes/General/APIDiffsMacOS10_12/Swift/Darwin.html @@ -785,6 +794,14 @@ fn main() { } }); + cfg.skip_static(move |name| { + match name { + // Internal constant, not declared in any headers. + "__progname" if android => true, + _ => false, + } + }); + cfg.skip_fn_ptrcheck(move |name| { match name { // dllimport weirdness? @@ -840,7 +857,8 @@ fn main() { // fails on a lot of platforms. let mut cfg = ctest::TestGenerator::new(); cfg.skip_type(|_| true) - .skip_fn(|_| true); + .skip_fn(|_| true) + .skip_static(|_| true); if android || linux { // musl defines these directly in `fcntl.h` if musl { diff --git a/src/liblibc/src/lib.rs b/src/liblibc/src/lib.rs index 7f8e907aaf..c997960a4b 100644 --- a/src/liblibc/src/lib.rs +++ b/src/liblibc/src/lib.rs @@ -101,14 +101,14 @@ extern crate std as core; #[macro_use] mod macros; mod dox; +/* + * `c_void` should be defined for all targets except wasm. + */ +#[cfg(not(all(target_arch = "wasm32", not(target_os = "emscripten"))))] cfg_if! { - if #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] { - // empty ... - } else if #[cfg(target_os = "switch")] { - // On the Switch, we only define some useful universal types for - // convenience. Those can be found in the switch.rs file. + if #[cfg(core_cvoid)] { + pub use core::ffi::c_void; } else { - // Use repr(u8) as LLVM expects `void*` to be the same as `i8*` to help enable // more optimization opportunities around it recognizing things like // malloc/free. @@ -120,7 +120,16 @@ cfg_if! { #[doc(hidden)] __variant2, } + } +} +cfg_if! { + if #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] { + // empty ... + } else if #[cfg(target_os = "switch")] { + // On the Switch, we only define some useful universal types for + // convenience. Those can be found in the switch.rs file. + } else { pub type int8_t = i8; pub type int16_t = i16; pub type int32_t = i32; diff --git a/src/liblibc/src/redox/mod.rs b/src/liblibc/src/redox/mod.rs index e32c4b385b..7c7f4229e6 100644 --- a/src/liblibc/src/redox/mod.rs +++ b/src/liblibc/src/redox/mod.rs @@ -118,15 +118,19 @@ pub const SIGPWR: ::c_int = 30; pub const SIGSYS: ::c_int = 31; extern { + pub fn chown(path: *const c_char, uid: uid_t, gid: gid_t) -> ::c_int; + pub fn close(fd: ::c_int) -> ::c_int; + pub fn fchown(fd: ::c_int, uid: ::uid_t, gid: ::gid_t) -> ::c_int; + pub fn fcntl(fd: ::c_int, cmd: ::c_int, ...) -> ::c_int; pub fn gethostname(name: *mut ::c_char, len: ::size_t) -> ::c_int; pub fn getpid() -> pid_t; pub fn memalign(align: ::size_t, size: ::size_t) -> *mut ::c_void; - pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::size_t) - -> ::ssize_t; + pub fn read(fd: ::c_int, buf: *mut ::c_void, count: ::size_t) -> ::ssize_t; + pub fn setenv(name: *const c_char, val: *const c_char, overwrite: ::c_int) + -> ::c_int; + pub fn unsetenv(name: *const c_char) -> ::c_int; pub fn write(fd: ::c_int, buf: *const ::c_void, count: ::size_t) -> ::ssize_t; - pub fn fcntl(fd: ::c_int, cmd: ::c_int, ...) -> ::c_int; - pub fn close(fd: ::c_int) -> ::c_int; } #[link(name = "c")] diff --git a/src/liblibc/src/switch.rs b/src/liblibc/src/switch.rs index c11379541e..d47d41b108 100644 --- a/src/liblibc/src/switch.rs +++ b/src/liblibc/src/switch.rs @@ -1,17 +1,5 @@ //! Switch C type definitions -// Use repr(u8) as LLVM expects `void*` to be the same as `i8*` to help enable -// more optimization opportunities around it recognizing things like -// malloc/free. -#[repr(u8)] -pub enum c_void { - // Two dummy variants so the #[repr] attribute can be used. - #[doc(hidden)] - __variant1, - #[doc(hidden)] - __variant2, -} - pub type int8_t = i8; pub type int16_t = i16; pub type int32_t = i32; diff --git a/src/liblibc/src/unix/bsd/apple/mod.rs b/src/liblibc/src/unix/bsd/apple/mod.rs index cf48528b4a..fe85eb92b2 100644 --- a/src/liblibc/src/unix/bsd/apple/mod.rs +++ b/src/liblibc/src/unix/bsd/apple/mod.rs @@ -747,6 +747,26 @@ pub const PROT_READ: ::c_int = 1; pub const PROT_WRITE: ::c_int = 2; pub const PROT_EXEC: ::c_int = 4; +pub const PT_TRACE_ME: ::c_int = 0; +pub const PT_READ_I: ::c_int = 1; +pub const PT_READ_D: ::c_int = 2; +pub const PT_READ_U: ::c_int = 3; +pub const PT_WRITE_I: ::c_int = 4; +pub const PT_WRITE_D: ::c_int = 5; +pub const PT_WRITE_U: ::c_int = 6; +pub const PT_CONTINUE: ::c_int = 7; +pub const PT_KILL: ::c_int = 8; +pub const PT_STEP: ::c_int = 9; +pub const PT_ATTACH: ::c_int = 10; +pub const PT_DETACH: ::c_int = 11; +pub const PT_SIGEXC: ::c_int = 12; +pub const PT_THUPDATE: ::c_int = 13; +pub const PT_ATTACHEXC: ::c_int = 14; + +pub const PT_FORCEQUOTA: ::c_int = 30; +pub const PT_DENY_ATTACH: ::c_int = 31; +pub const PT_FIRSTMACH: ::c_int = 32; + pub const MAP_FILE: ::c_int = 0x0000; pub const MAP_SHARED: ::c_int = 0x0001; pub const MAP_PRIVATE: ::c_int = 0x0002; @@ -2350,6 +2370,7 @@ extern { pub fn mprotect(addr: *mut ::c_void, len: ::size_t, prot: ::c_int) -> ::c_int; pub fn shm_open(name: *const ::c_char, oflag: ::c_int, ...) -> ::c_int; + pub fn ftok(pathname : *const c_char, proj_id : ::c_int) -> key_t; pub fn shmat(shmid: ::c_int, shmaddr: *const ::c_void, shmflg: ::c_int) -> *mut ::c_void; pub fn shmdt(shmaddr: *const ::c_void) -> ::c_int; @@ -2427,6 +2448,9 @@ extern { len: *mut ::off_t, hdtr: *mut ::sf_hdtr, flags: ::c_int) -> ::c_int; + pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int; + pub fn utimensat(dirfd: ::c_int, path: *const ::c_char, + times: *const ::timespec, flag: ::c_int) -> ::c_int; pub fn openpty(amaster: *mut ::c_int, aslave: *mut ::c_int, name: *mut ::c_char, diff --git a/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs index 9e1082e53b..e192124b52 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/dragonfly/mod.rs @@ -429,6 +429,8 @@ pub const NOTE_CHILD: ::uint32_t = 0x00000004; pub const SO_SNDSPACE: ::c_int = 0x100a; pub const SO_CPUHINT: ::c_int = 0x1030; +pub const PT_FIRSTMACH: ::c_int = 32; + // https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/net/if.h#L101 pub const IFF_UP: ::c_int = 0x1; // interface is up pub const IFF_BROADCAST: ::c_int = 0x2; // broadcast address valid diff --git a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs index a64dbc468f..a64241a1ae 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/freebsd/mod.rs @@ -457,6 +457,7 @@ pub const JAIL_SYS_INHERIT: ::c_int = 2; pub const SO_BINTIME: ::c_int = 0x2000; pub const SO_NO_OFFLOAD: ::c_int = 0x4000; pub const SO_NO_DDP: ::c_int = 0x8000; +pub const SO_REUSEPORT_LB: ::c_int = 0x10000; pub const SO_LABEL: ::c_int = 0x1009; pub const SO_PEERLABEL: ::c_int = 0x1010; pub const SO_LISTENQLIMIT: ::c_int = 0x1011; @@ -473,6 +474,39 @@ pub const LOCAL_CREDS: ::c_int = 2; pub const LOCAL_CONNWAIT: ::c_int = 4; pub const LOCAL_VENDOR: ::c_int = SO_VENDOR; +pub const PT_LWPINFO: ::c_int = 13; +pub const PT_GETNUMLWPS: ::c_int = 14; +pub const PT_GETLWPLIST: ::c_int = 15; +pub const PT_CLEARSTEP: ::c_int = 16; +pub const PT_SETSTEP: ::c_int = 17; +pub const PT_SUSPEND: ::c_int = 18; +pub const PT_RESUME: ::c_int = 19; +pub const PT_TO_SCE: ::c_int = 20; +pub const PT_TO_SCX: ::c_int = 21; +pub const PT_SYSCALL: ::c_int = 22; +pub const PT_FOLLOW_FORK: ::c_int = 23; +pub const PT_LWP_EVENTS: ::c_int = 24; +pub const PT_GET_EVENT_MASK: ::c_int = 25; +pub const PT_SET_EVENT_MASK: ::c_int = 26; +pub const PT_GETREGS: ::c_int = 33; +pub const PT_SETREGS: ::c_int = 34; +pub const PT_GETFPREGS: ::c_int = 35; +pub const PT_SETFPREGS: ::c_int = 36; +pub const PT_GETDBREGS: ::c_int = 37; +pub const PT_SETDBREGS: ::c_int = 38; +pub const PT_VM_TIMESTAMP: ::c_int = 40; +pub const PT_VM_ENTRY: ::c_int = 41; +pub const PT_FIRSTMACH: ::c_int = 64; + +pub const PTRACE_EXEC: ::c_int = 0x0001; +pub const PTRACE_SCE: ::c_int = 0x0002; +pub const PTRACE_SCX: ::c_int = 0x0004; +pub const PTRACE_SYSCALL: ::c_int = PTRACE_SCE | PTRACE_SCX; +pub const PTRACE_FORK: ::c_int = 0x0008; +pub const PTRACE_LWP: ::c_int = 0x0010; +pub const PTRACE_VFORK: ::c_int = 0x0020; +pub const PTRACE_DEFAULT: ::c_int = PTRACE_EXEC; + pub const AF_SLOW: ::c_int = 33; pub const AF_SCLUSTER: ::c_int = 34; pub const AF_ARP: ::c_int = 35; diff --git a/src/liblibc/src/unix/bsd/freebsdlike/mod.rs b/src/liblibc/src/unix/bsd/freebsdlike/mod.rs index 75a7a670e9..6325395365 100644 --- a/src/liblibc/src/unix/bsd/freebsdlike/mod.rs +++ b/src/liblibc/src/unix/bsd/freebsdlike/mod.rs @@ -624,6 +624,18 @@ pub const PF_NATM: ::c_int = AF_NATM; pub const PF_ATM: ::c_int = AF_ATM; pub const PF_NETGRAPH: ::c_int = AF_NETGRAPH; +pub const PT_TRACE_ME: ::c_int = 0; +pub const PT_READ_I: ::c_int = 1; +pub const PT_READ_D: ::c_int = 2; +pub const PT_WRITE_I: ::c_int = 4; +pub const PT_WRITE_D: ::c_int = 5; +pub const PT_CONTINUE: ::c_int = 7; +pub const PT_KILL: ::c_int = 8; +pub const PT_STEP: ::c_int = 9; +pub const PT_ATTACH: ::c_int = 10; +pub const PT_DETACH: ::c_int = 11; +pub const PT_IO: ::c_int = 12; + pub const SOMAXCONN: ::c_int = 128; pub const MSG_OOB: ::c_int = 0x00000001; @@ -1136,6 +1148,9 @@ extern { timeout: *const ::timespec) -> ::c_int; pub fn sigwaitinfo(set: *const sigset_t, info: *mut siginfo_t) -> ::c_int; + pub fn futimens(fd: ::c_int, times: *const ::timespec) -> ::c_int; + pub fn utimensat(dirfd: ::c_int, path: *const ::c_char, + times: *const ::timespec, flag: ::c_int) -> ::c_int; pub fn openpty(amaster: *mut ::c_int, aslave: *mut ::c_int, name: *mut ::c_char, @@ -1202,6 +1217,10 @@ extern { pub fn settimeofday(tv: *const ::timeval, tz: *const ::timezone) -> ::c_int; pub fn getdomainname(name: *mut ::c_char, len: ::c_int) -> ::c_int; pub fn setdomainname(name: *const ::c_char, len: ::c_int) -> ::c_int; + pub fn ptrace(request: ::c_int, + pid: ::pid_t, + addr: *mut ::c_char, + data: ::c_int) -> ::c_int; } cfg_if! { diff --git a/src/liblibc/src/unix/bsd/mod.rs b/src/liblibc/src/unix/bsd/mod.rs index 03ab319269..98d91d046c 100644 --- a/src/liblibc/src/unix/bsd/mod.rs +++ b/src/liblibc/src/unix/bsd/mod.rs @@ -215,6 +215,10 @@ pub const O_NDELAY: ::c_int = O_NONBLOCK; pub const F_GETOWN: ::c_int = 5; pub const F_SETOWN: ::c_int = 6; +pub const F_RDLCK: ::c_short = 1; +pub const F_UNLCK: ::c_short = 2; +pub const F_WRLCK: ::c_short = 3; + pub const MNT_FORCE: ::c_int = 0x80000; pub const Q_SYNC: ::c_int = 0x600; @@ -539,6 +543,7 @@ extern { attr: *const ::pthread_attr_t, f: extern fn(*mut ::c_void) -> *mut ::c_void, value: *mut ::c_void) -> ::c_int; + pub fn acct(filename: *const ::c_char) -> ::c_int; } cfg_if! { diff --git a/src/liblibc/src/unix/bsd/netbsdlike/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/mod.rs index 6384a29f8c..684d1a93f7 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/mod.rs @@ -314,6 +314,17 @@ pub const POSIX_MADV_DONTNEED : ::c_int = 4; pub const PTHREAD_CREATE_JOINABLE : ::c_int = 0; pub const PTHREAD_CREATE_DETACHED : ::c_int = 1; +pub const PT_TRACE_ME: ::c_int = 0; +pub const PT_READ_I: ::c_int = 1; +pub const PT_READ_D: ::c_int = 2; +pub const PT_WRITE_I: ::c_int = 4; +pub const PT_WRITE_D: ::c_int = 5; +pub const PT_CONTINUE: ::c_int = 7; +pub const PT_KILL: ::c_int = 8; +pub const PT_ATTACH: ::c_int = 9; +pub const PT_DETACH: ::c_int = 10; +pub const PT_IO: ::c_int = 11; + // http://man.openbsd.org/OpenBSD-current/man2/clock_getres.2 // The man page says clock_gettime(3) can accept various values as clockid_t but // http://fxr.watson.org/fxr/source/kern/kern_time.c?v=OPENBSD;im=excerpts#L161 diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs index 6aa9950ed1..fe05721923 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/aarch64.rs @@ -1,3 +1,10 @@ +use PT_FIRSTMACH; + pub type c_long = i64; pub type c_ulong = u64; pub type c_char = u8; + +pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 0; +pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 1; +pub const PT_GETFPREGS: ::c_int = PT_FIRSTMACH + 2; +pub const PT_SETFPREGS: ::c_int = PT_FIRSTMACH + 3; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/arm.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/arm.rs index 377e05be07..d4e33a8a20 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/arm.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/arm.rs @@ -1,3 +1,10 @@ +use PT_FIRSTMACH; + pub type c_long = i32; pub type c_ulong = u32; pub type c_char = u8; + +pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: ::c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: ::c_int = PT_FIRSTMACH + 4; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs index b343c3a197..6703d0ab54 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/mod.rs @@ -969,6 +969,15 @@ pub const SOCK_NONBLOCK: ::c_int = 0x20000000; pub const SIGSTKSZ : ::size_t = 40960; +pub const PT_DUMPCORE: ::c_int = 12; +pub const PT_LWPINFO: ::c_int = 13; +pub const PT_SYSCALL: ::c_int = 14; +pub const PT_SYSCALLEMU: ::c_int = 15; +pub const PT_SET_EVENT_MASK: ::c_int = 16; +pub const PT_GET_EVENT_MASK: ::c_int = 17; +pub const PT_GET_PROCESS_STATE: ::c_int = 18; +pub const PT_FIRSTMACH: ::c_int = 32; + // dirfd() is a macro on netbsd to access // the first field of the struct where dirp points to: // http://cvsweb.netbsd.org/bsdweb.cgi/src/include/dirent.h?rev=1.36 diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs index 377e05be07..4937dd9a6b 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/powerpc.rs @@ -1,3 +1,9 @@ +use PT_FIRSTMACH; + pub type c_long = i32; pub type c_ulong = u32; pub type c_char = u8; + +pub const PT_STEP: ::c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 2; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs index 27b9412668..3bc7f52471 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/netbsd/x86_64.rs @@ -1,3 +1,11 @@ +use PT_FIRSTMACH; + pub type c_long = i64; pub type c_ulong = u64; pub type c_char = i8; + +pub const PT_STEP: ::c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: ::c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: ::c_int = PT_FIRSTMACH + 4; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs index 557420485a..e5f0219e03 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/mod.rs @@ -85,6 +85,8 @@ pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast pub const SIGSTKSZ : ::size_t = 40960; +pub const PT_FIRSTMACH: ::c_int = 32; + extern { pub fn nl_langinfo_l(item: ::nl_item, locale: ::locale_t) -> *mut ::c_char; pub fn duplocale(base: ::locale_t) -> ::locale_t; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86_64.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86_64.rs index b07c476aa4..d3971aa35b 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86_64.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/bitrig/x86_64.rs @@ -1,2 +1,10 @@ +use PT_FIRSTMACH; + pub type c_long = i64; pub type c_ulong = u64; + +pub const PT_STEP: ::c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: ::c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: ::c_int = PT_FIRSTMACH + 4; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs index bf5ddd2e7a..ab8e0c3d30 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/mod.rs @@ -14,6 +14,7 @@ pub type pthread_cond_t = *mut ::c_void; pub type pthread_condattr_t = *mut ::c_void; pub type pthread_rwlock_t = *mut ::c_void; pub type pthread_rwlockattr_t = *mut ::c_void; +pub type caddr_t = *mut ::c_char; s! { pub struct dirent { @@ -695,6 +696,8 @@ pub const SOCK_CLOEXEC: ::c_int = 0x8000; pub const SOCK_NONBLOCK: ::c_int = 0x4000; pub const SOCK_DNS: ::c_int = 0x1000; +pub const PTRACE_FORK: ::c_int = 0x0002; + pub const WCONTINUED: ::c_int = 8; f! { @@ -734,6 +737,10 @@ extern { pub fn getentropy(buf: *mut ::c_void, buflen: ::size_t) -> ::c_int; pub fn setresgid(rgid: ::gid_t, egid: ::gid_t, sgid: ::gid_t) -> ::c_int; pub fn setresuid(ruid: ::uid_t, euid: ::uid_t, suid: ::uid_t) -> ::c_int; + pub fn ptrace(request: ::c_int, + pid: ::pid_t, + addr: caddr_t, + data: ::c_int) -> ::c_int; } cfg_if! { diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs index 5e6948115f..d862e7b55e 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/mod.rs @@ -248,6 +248,8 @@ pub const IFF_MULTICAST: ::c_int = 0x8000; // supports multicast pub const SIGSTKSZ : ::size_t = 28672; +pub const PT_FIRSTMACH: ::c_int = 32; + extern { pub fn accept4(s: ::c_int, addr: *mut ::sockaddr, addrlen: *mut ::socklen_t, flags: ::c_int) -> ::c_int; diff --git a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86_64.rs b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86_64.rs index 27b9412668..3bc7f52471 100644 --- a/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86_64.rs +++ b/src/liblibc/src/unix/bsd/netbsdlike/openbsdlike/openbsd/x86_64.rs @@ -1,3 +1,11 @@ +use PT_FIRSTMACH; + pub type c_long = i64; pub type c_ulong = u64; pub type c_char = i8; + +pub const PT_STEP: ::c_int = PT_FIRSTMACH + 0; +pub const PT_GETREGS: ::c_int = PT_FIRSTMACH + 1; +pub const PT_SETREGS: ::c_int = PT_FIRSTMACH + 2; +pub const PT_GETFPREGS: ::c_int = PT_FIRSTMACH + 3; +pub const PT_SETFPREGS: ::c_int = PT_FIRSTMACH + 4; diff --git a/src/liblibc/src/unix/mod.rs b/src/liblibc/src/unix/mod.rs index 9c68178ad1..06275a6994 100644 --- a/src/liblibc/src/unix/mod.rs +++ b/src/liblibc/src/unix/mod.rs @@ -641,6 +641,7 @@ extern { pub fn symlink(path1: *const c_char, path2: *const c_char) -> ::c_int; + pub fn truncate(path: *const c_char, length: off_t) -> ::c_int; pub fn ftruncate(fd: ::c_int, length: off_t) -> ::c_int; pub fn signal(signum: ::c_int, handler: sighandler_t) -> sighandler_t; diff --git a/src/liblibc/src/unix/notbsd/linux/mod.rs b/src/liblibc/src/unix/notbsd/linux/mod.rs index 6bc7db191c..1e1a66709c 100644 --- a/src/liblibc/src/unix/notbsd/linux/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/mod.rs @@ -33,6 +33,10 @@ pub type Elf64_Word = u32; pub type Elf64_Off = u64; pub type Elf64_Addr = u64; pub type Elf64_Xword = u64; +pub type Elf64_Sxword = i64; + +pub type Elf32_Section = u16; +pub type Elf64_Section = u16; pub enum fpos64_t {} // TODO: fill this out with a struct @@ -479,6 +483,58 @@ s! { pub dlpi_tls_data: *mut ::c_void, } + pub struct Elf32_Ehdr { + pub e_ident: [::c_uchar; 16], + pub e_type: Elf32_Half, + pub e_machine: Elf32_Half, + pub e_version: Elf32_Word, + pub e_entry: Elf32_Addr, + pub e_phoff: Elf32_Off, + pub e_shoff: Elf32_Off, + pub e_flags: Elf32_Word, + pub e_ehsize: Elf32_Half, + pub e_phentsize: Elf32_Half, + pub e_phnum: Elf32_Half, + pub e_shentsize: Elf32_Half, + pub e_shnum: Elf32_Half, + pub e_shstrndx: Elf32_Half, + } + + pub struct Elf64_Ehdr { + pub e_ident: [::c_uchar; 16], + pub e_type: Elf64_Half, + pub e_machine: Elf64_Half, + pub e_version: Elf64_Word, + pub e_entry: Elf64_Addr, + pub e_phoff: Elf64_Off, + pub e_shoff: Elf64_Off, + pub e_flags: Elf64_Word, + pub e_ehsize: Elf64_Half, + pub e_phentsize: Elf64_Half, + pub e_phnum: Elf64_Half, + pub e_shentsize: Elf64_Half, + pub e_shnum: Elf64_Half, + pub e_shstrndx: Elf64_Half, + } + + pub struct Elf32_Sym { + pub st_name: Elf32_Word, + pub st_value: Elf32_Addr, + pub st_size: Elf32_Word, + pub st_info: ::c_uchar, + pub st_other: ::c_uchar, + pub st_shndx: Elf32_Section, + } + + pub struct Elf64_Sym { + pub st_name: Elf64_Word, + pub st_info: ::c_uchar, + pub st_other: ::c_uchar, + pub st_shndx: Elf64_Section, + pub st_value: Elf64_Addr, + pub st_size: Elf64_Xword, + } + pub struct Elf32_Phdr { pub p_type: Elf32_Word, pub p_offset: Elf32_Off, @@ -501,6 +557,45 @@ s! { pub p_align: Elf64_Xword, } + pub struct Elf32_Shdr { + pub sh_name: Elf32_Word, + pub sh_type: Elf32_Word, + pub sh_flags: Elf32_Word, + pub sh_addr: Elf32_Addr, + pub sh_offset: Elf32_Off, + pub sh_size: Elf32_Word, + pub sh_link: Elf32_Word, + pub sh_info: Elf32_Word, + pub sh_addralign: Elf32_Word, + pub sh_entsize: Elf32_Word, + } + + pub struct Elf64_Shdr { + pub sh_name: Elf64_Word, + pub sh_type: Elf64_Word, + pub sh_flags: Elf64_Xword, + pub sh_addr: Elf64_Addr, + pub sh_offset: Elf64_Off, + pub sh_size: Elf64_Xword, + pub sh_link: Elf64_Word, + pub sh_info: Elf64_Word, + pub sh_addralign: Elf64_Xword, + pub sh_entsize: Elf64_Xword, + } + + pub struct Elf32_Chdr { + pub ch_type: Elf32_Word, + pub ch_size: Elf32_Word, + pub ch_addralign: Elf32_Word, + } + + pub struct Elf64_Chdr { + pub ch_type: Elf64_Word, + pub ch_reserved: Elf64_Word, + pub ch_size: Elf64_Xword, + pub ch_addralign: Elf64_Xword, + } + pub struct ucred { pub pid: ::pid_t, pub uid: ::uid_t, @@ -1721,8 +1816,12 @@ extern { pub fn ftello64(stream: *mut ::FILE) -> ::off64_t; pub fn fallocate(fd: ::c_int, mode: ::c_int, offset: ::off_t, len: ::off_t) -> ::c_int; + pub fn fallocate64(fd: ::c_int, mode: ::c_int, + offset: ::off64_t, len: ::off64_t) -> ::c_int; pub fn posix_fallocate(fd: ::c_int, offset: ::off_t, len: ::off_t) -> ::c_int; + pub fn posix_fallocate64(fd: ::c_int, offset: ::off64_t, + len: ::off64_t) -> ::c_int; pub fn readahead(fd: ::c_int, offset: ::off64_t, count: ::size_t) -> ::ssize_t; pub fn getxattr(path: *const c_char, name: *const c_char, diff --git a/src/liblibc/src/unix/notbsd/linux/other/mod.rs b/src/liblibc/src/unix/notbsd/linux/other/mod.rs index 93b710b8df..8e32edf7a7 100644 --- a/src/liblibc/src/unix/notbsd/linux/other/mod.rs +++ b/src/liblibc/src/unix/notbsd/linux/other/mod.rs @@ -824,6 +824,19 @@ pub const NFT_TRACETYPE_RULE: ::c_int = 3; pub const NFT_NG_INCREMENTAL: ::c_int = 0; pub const NFT_NG_RANDOM: ::c_int = 1; +pub const M_MXFAST: ::c_int = 1; +pub const M_NLBLKS: ::c_int = 2; +pub const M_GRAIN: ::c_int = 3; +pub const M_KEEP: ::c_int = 4; +pub const M_TRIM_THRESHOLD: ::c_int = -1; +pub const M_TOP_PAD: ::c_int = -2; +pub const M_MMAP_THRESHOLD: ::c_int = -3; +pub const M_MMAP_MAX: ::c_int = -4; +pub const M_CHECK_ACTION: ::c_int = -5; +pub const M_PERTURB: ::c_int = -6; +pub const M_ARENA_TEST: ::c_int = -7; +pub const M_ARENA_MAX: ::c_int = -8; + #[doc(hidden)] pub const AF_MAX: ::c_int = 42; #[doc(hidden)] @@ -856,6 +869,7 @@ extern { pub fn setutxent(); pub fn endutxent(); pub fn getpt() -> ::c_int; + pub fn mallopt(param: ::c_int, value: ::c_int) -> ::c_int; } #[link(name = "util")] diff --git a/src/liblibc/src/unix/uclibc/mips/mod.rs b/src/liblibc/src/unix/uclibc/mips/mod.rs index d197249d05..27f6fe5846 100644 --- a/src/liblibc/src/unix/uclibc/mips/mod.rs +++ b/src/liblibc/src/unix/uclibc/mips/mod.rs @@ -226,8 +226,8 @@ pub const SO_BPF_EXTENSIONS: ::c_int = 48; pub const FIOCLEX: ::c_ulong = 0x6601; pub const FIONBIO: ::c_ulong = 0x667e; -pub const SA_ONSTACK: ::c_int = 0x08000000; -pub const SA_SIGINFO: ::c_int = 0x00000008; +pub const SA_ONSTACK: ::c_uint = 0x08000000; +pub const SA_SIGINFO: ::c_uint = 0x00000008; pub const SA_NOCLDWAIT: ::c_int = 0x00010000; pub const SIGCHLD: ::c_int = 18; diff --git a/src/liblibc/src/unix/uclibc/mod.rs b/src/liblibc/src/unix/uclibc/mod.rs index e3606c2266..5a947fddd6 100644 --- a/src/liblibc/src/unix/uclibc/mod.rs +++ b/src/liblibc/src/unix/uclibc/mod.rs @@ -1634,6 +1634,8 @@ extern { pub fn uselocale(loc: ::locale_t) -> ::locale_t; pub fn creat64(path: *const c_char, mode: mode_t) -> ::c_int; pub fn fstat64(fildes: ::c_int, buf: *mut stat64) -> ::c_int; + pub fn fstatat64(fildes: ::c_int, path: *const ::c_char, + buf: *mut stat64, flag: ::c_int) -> ::c_int; pub fn ftruncate64(fd: ::c_int, length: off64_t) -> ::c_int; pub fn getrlimit64(resource: ::c_int, rlim: *mut rlimit64) -> ::c_int; pub fn lseek64(fd: ::c_int, offset: off64_t, whence: ::c_int) -> off64_t; @@ -1853,6 +1855,8 @@ extern { pub fn seekdir(dirp: *mut ::DIR, loc: ::c_long); + pub fn dirfd(dirp: *mut ::DIR) -> ::c_int; + pub fn telldir(dirp: *mut ::DIR) -> ::c_long; pub fn madvise(addr: *mut ::c_void, len: ::size_t, advice: ::c_int) -> ::c_int; diff --git a/src/libpanic_abort/lib.rs b/src/libpanic_abort/lib.rs index b86928534c..9235f8e766 100644 --- a/src/libpanic_abort/lib.rs +++ b/src/libpanic_abort/lib.rs @@ -97,7 +97,10 @@ pub unsafe extern fn __rust_start_panic(_payload: usize) -> u32 { pub mod personalities { #[no_mangle] #[cfg(not(any( - target_arch = "wasm32", + all( + target_arch = "wasm32", + not(target_os = "emscripten"), + ), all( target_os = "windows", target_env = "gnu", diff --git a/src/libpanic_unwind/dwarf/mod.rs b/src/libpanic_unwind/dwarf/mod.rs index 7e0c32fe03..3ff250ff65 100644 --- a/src/libpanic_unwind/dwarf/mod.rs +++ b/src/libpanic_unwind/dwarf/mod.rs @@ -29,7 +29,7 @@ struct Unaligned(T); impl DwarfReader { pub fn new(ptr: *const u8) -> DwarfReader { - DwarfReader { ptr: ptr } + DwarfReader { ptr } } // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned diff --git a/src/libpanic_unwind/seh64_gnu.rs b/src/libpanic_unwind/seh64_gnu.rs index c2074db003..60e9829ef9 100644 --- a/src/libpanic_unwind/seh64_gnu.rs +++ b/src/libpanic_unwind/seh64_gnu.rs @@ -41,7 +41,7 @@ struct PanicData { } pub unsafe fn panic(data: Box) -> u32 { - let panic_ctx = Box::new(PanicData { data: data }); + let panic_ctx = Box::new(PanicData { data }); let params = [Box::into_raw(panic_ctx) as c::ULONG_PTR]; c::RaiseException(RUST_PANIC, c::EXCEPTION_NONCONTINUABLE, diff --git a/src/libproc_macro/Cargo.toml b/src/libproc_macro/Cargo.toml index c1b2622520..f903f79f9a 100644 --- a/src/libproc_macro/Cargo.toml +++ b/src/libproc_macro/Cargo.toml @@ -5,10 +5,3 @@ version = "0.0.0" [lib] path = "lib.rs" -crate-type = ["dylib"] - -[dependencies] -syntax = { path = "../libsyntax" } -syntax_pos = { path = "../libsyntax_pos" } -rustc_errors = { path = "../librustc_errors" } -rustc_data_structures = { path = "../librustc_data_structures" } diff --git a/src/libproc_macro/bridge/buffer.rs b/src/libproc_macro/bridge/buffer.rs new file mode 100644 index 0000000000..f228841c1e --- /dev/null +++ b/src/libproc_macro/bridge/buffer.rs @@ -0,0 +1,170 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Buffer management for same-process client<->server communication. + +use std::io::{self, Write}; +use std::mem; +use std::ops::{Deref, DerefMut}; +use std::slice; + +#[repr(C)] +struct Slice<'a, T: 'a> { + data: &'a [T; 0], + len: usize, +} + +unsafe impl<'a, T: Sync> Sync for Slice<'a, T> {} +unsafe impl<'a, T: Sync> Send for Slice<'a, T> {} + +impl Copy for Slice<'a, T> {} +impl Clone for Slice<'a, T> { + fn clone(&self) -> Self { + *self + } +} + +impl From<&'a [T]> for Slice<'a, T> { + fn from(xs: &'a [T]) -> Self { + Slice { + data: unsafe { &*(xs.as_ptr() as *const [T; 0]) }, + len: xs.len(), + } + } +} + +impl Deref for Slice<'a, T> { + type Target = [T]; + fn deref(&self) -> &[T] { + unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) } + } +} + +#[repr(C)] +pub struct Buffer { + data: *mut T, + len: usize, + capacity: usize, + extend_from_slice: extern "C" fn(Buffer, Slice) -> Buffer, + drop: extern "C" fn(Buffer), +} + +unsafe impl Sync for Buffer {} +unsafe impl Send for Buffer {} + +impl Default for Buffer { + fn default() -> Self { + Self::from(vec![]) + } +} + +impl Deref for Buffer { + type Target = [T]; + fn deref(&self) -> &[T] { + unsafe { slice::from_raw_parts(self.data as *const T, self.len) } + } +} + +impl DerefMut for Buffer { + fn deref_mut(&mut self) -> &mut [T] { + unsafe { slice::from_raw_parts_mut(self.data, self.len) } + } +} + +impl Buffer { + pub(super) fn new() -> Self { + Self::default() + } + + pub(super) fn clear(&mut self) { + self.len = 0; + } + + pub(super) fn take(&mut self) -> Self { + mem::replace(self, Self::default()) + } + + pub(super) fn extend_from_slice(&mut self, xs: &[T]) { + // Fast path to avoid going through an FFI call. + if let Some(final_len) = self.len.checked_add(xs.len()) { + if final_len <= self.capacity { + let dst = unsafe { slice::from_raw_parts_mut(self.data, self.capacity) }; + dst[self.len..][..xs.len()].copy_from_slice(xs); + self.len = final_len; + return; + } + } + let b = self.take(); + *self = (b.extend_from_slice)(b, Slice::from(xs)); + } +} + +impl Write for Buffer { + fn write(&mut self, xs: &[u8]) -> io::Result { + self.extend_from_slice(xs); + Ok(xs.len()) + } + + fn write_all(&mut self, xs: &[u8]) -> io::Result<()> { + self.extend_from_slice(xs); + Ok(()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl Drop for Buffer { + fn drop(&mut self) { + let b = self.take(); + (b.drop)(b); + } +} + +impl From> for Buffer { + fn from(mut v: Vec) -> Self { + let (data, len, capacity) = (v.as_mut_ptr(), v.len(), v.capacity()); + mem::forget(v); + + // This utility function is nested in here because it can *only* + // be safely called on `Buffer`s created by *this* `proc_macro`. + fn to_vec(b: Buffer) -> Vec { + unsafe { + let Buffer { + data, + len, + capacity, + .. + } = b; + mem::forget(b); + Vec::from_raw_parts(data, len, capacity) + } + } + + extern "C" fn extend_from_slice(b: Buffer, xs: Slice) -> Buffer { + let mut v = to_vec(b); + v.extend_from_slice(&xs); + Buffer::from(v) + } + + extern "C" fn drop(b: Buffer) { + mem::drop(to_vec(b)); + } + + Buffer { + data, + len, + capacity, + extend_from_slice, + drop, + } + } +} diff --git a/src/libproc_macro/bridge/client.rs b/src/libproc_macro/bridge/client.rs new file mode 100644 index 0000000000..ed27df4496 --- /dev/null +++ b/src/libproc_macro/bridge/client.rs @@ -0,0 +1,504 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Client-side types. + +use super::*; + +macro_rules! define_handles { + ( + 'owned: $($oty:ident,)* + 'interned: $($ity:ident,)* + ) => { + #[repr(C)] + #[allow(non_snake_case)] + pub struct HandleCounters { + $($oty: AtomicUsize,)* + $($ity: AtomicUsize,)* + } + + impl HandleCounters { + // FIXME(#53451) public to work around `Cannot create local mono-item` ICE. + pub extern "C" fn get() -> &'static Self { + static COUNTERS: HandleCounters = HandleCounters { + $($oty: AtomicUsize::new(1),)* + $($ity: AtomicUsize::new(1),)* + }; + &COUNTERS + } + } + + // FIXME(eddyb) generate the definition of `HandleStore` in `server.rs`. + #[repr(C)] + #[allow(non_snake_case)] + pub(super) struct HandleStore { + $($oty: handle::OwnedStore,)* + $($ity: handle::InternedStore,)* + } + + impl HandleStore { + pub(super) fn new(handle_counters: &'static HandleCounters) -> Self { + HandleStore { + $($oty: handle::OwnedStore::new(&handle_counters.$oty),)* + $($ity: handle::InternedStore::new(&handle_counters.$ity),)* + } + } + } + + $( + #[repr(C)] + pub(crate) struct $oty(handle::Handle); + impl !Send for $oty {} + impl !Sync for $oty {} + + // Forward `Drop::drop` to the inherent `drop` method. + impl Drop for $oty { + fn drop(&mut self) { + $oty(self.0).drop(); + } + } + + impl Encode for $oty { + fn encode(self, w: &mut Writer, s: &mut S) { + let handle = self.0; + mem::forget(self); + handle.encode(w, s); + } + } + + impl DecodeMut<'_, '_, HandleStore>> + for Marked + { + fn decode(r: &mut Reader, s: &mut HandleStore>) -> Self { + s.$oty.take(handle::Handle::decode(r, &mut ())) + } + } + + impl Encode for &$oty { + fn encode(self, w: &mut Writer, s: &mut S) { + self.0.encode(w, s); + } + } + + impl Decode<'_, 's, HandleStore>> + for &'s Marked + { + fn decode(r: &mut Reader, s: &'s HandleStore>) -> Self { + &s.$oty[handle::Handle::decode(r, &mut ())] + } + } + + impl Encode for &mut $oty { + fn encode(self, w: &mut Writer, s: &mut S) { + self.0.encode(w, s); + } + } + + impl DecodeMut<'_, 's, HandleStore>> + for &'s mut Marked + { + fn decode(r: &mut Reader, s: &'s mut HandleStore>) -> Self { + &mut s.$oty[handle::Handle::decode(r, &mut ())] + } + } + + impl Encode>> + for Marked + { + fn encode(self, w: &mut Writer, s: &mut HandleStore>) { + s.$oty.alloc(self).encode(w, s); + } + } + + impl DecodeMut<'_, '_, S> for $oty { + fn decode(r: &mut Reader, s: &mut S) -> Self { + $oty(handle::Handle::decode(r, s)) + } + } + )* + + $( + #[repr(C)] + #[derive(Copy, Clone, PartialEq, Eq, Hash)] + pub(crate) struct $ity(handle::Handle); + impl !Send for $ity {} + impl !Sync for $ity {} + + impl Encode for $ity { + fn encode(self, w: &mut Writer, s: &mut S) { + self.0.encode(w, s); + } + } + + impl DecodeMut<'_, '_, HandleStore>> + for Marked + { + fn decode(r: &mut Reader, s: &mut HandleStore>) -> Self { + s.$ity.copy(handle::Handle::decode(r, &mut ())) + } + } + + impl Encode>> + for Marked + { + fn encode(self, w: &mut Writer, s: &mut HandleStore>) { + s.$ity.alloc(self).encode(w, s); + } + } + + impl DecodeMut<'_, '_, S> for $ity { + fn decode(r: &mut Reader, s: &mut S) -> Self { + $ity(handle::Handle::decode(r, s)) + } + } + )* + } +} +define_handles! { + 'owned: + TokenStream, + TokenStreamBuilder, + TokenStreamIter, + Group, + Literal, + SourceFile, + MultiSpan, + Diagnostic, + + 'interned: + Punct, + Ident, + Span, +} + +// FIXME(eddyb) generate these impls by pattern-matching on the +// names of methods - also could use the presence of `fn drop` +// to distinguish between 'owned and 'interned, above. +// Alternatively, special 'modes" could be listed of types in with_api +// instead of pattern matching on methods, here and in server decl. + +impl Clone for TokenStream { + fn clone(&self) -> Self { + self.clone() + } +} + +impl Clone for TokenStreamIter { + fn clone(&self) -> Self { + self.clone() + } +} + +impl Clone for Group { + fn clone(&self) -> Self { + self.clone() + } +} + +impl Clone for Literal { + fn clone(&self) -> Self { + self.clone() + } +} + +// FIXME(eddyb) `Literal` should not expose internal `Debug` impls. +impl fmt::Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.debug()) + } +} + +impl Clone for SourceFile { + fn clone(&self) -> Self { + self.clone() + } +} + +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&self.debug()) + } +} + +macro_rules! define_client_side { + ($($name:ident { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* + }),* $(,)*) => { + $(impl $name { + $(pub(crate) fn $method($($arg: $arg_ty),*) $(-> $ret_ty)* { + Bridge::with(|bridge| { + let mut b = bridge.cached_buffer.take(); + + b.clear(); + api_tags::Method::$name(api_tags::$name::$method).encode(&mut b, &mut ()); + reverse_encode!(b; $($arg),*); + + b = bridge.dispatch.call(b); + + let r = Result::<_, PanicMessage>::decode(&mut &b[..], &mut ()); + + bridge.cached_buffer = b; + + r.unwrap_or_else(|e| panic::resume_unwind(e.into())) + }) + })* + })* + } +} +with_api!(self, self, define_client_side); + +enum BridgeState<'a> { + /// No server is currently connected to this client. + NotConnected, + + /// A server is connected and available for requests. + Connected(Bridge<'a>), + + /// Access to the bridge is being exclusively acquired + /// (e.g. during `BridgeState::with`). + InUse, +} + +enum BridgeStateL {} + +impl<'a> scoped_cell::ApplyL<'a> for BridgeStateL { + type Out = BridgeState<'a>; +} + +thread_local! { + static BRIDGE_STATE: scoped_cell::ScopedCell = + scoped_cell::ScopedCell::new(BridgeState::NotConnected); +} + +impl BridgeState<'_> { + /// Take exclusive control of the thread-local + /// `BridgeState`, and pass it to `f`, mutably. + /// The state will be restored after `f` exits, even + /// by panic, including modifications made to it by `f`. + /// + /// NB: while `f` is running, the thread-local state + /// is `BridgeState::InUse`. + fn with(f: impl FnOnce(&mut BridgeState) -> R) -> R { + BRIDGE_STATE.with(|state| { + state.replace(BridgeState::InUse, |mut state| { + // FIXME(#52812) pass `f` directly to `replace` when `RefMutL` is gone + f(&mut *state) + }) + }) + } +} + +impl Bridge<'_> { + fn enter(self, f: impl FnOnce() -> R) -> R { + // Hide the default panic output within `proc_macro` expansions. + // NB. the server can't do this because it may use a different libstd. + static HIDE_PANICS_DURING_EXPANSION: Once = Once::new(); + HIDE_PANICS_DURING_EXPANSION.call_once(|| { + let prev = panic::take_hook(); + panic::set_hook(Box::new(move |info| { + let hide = BridgeState::with(|state| match state { + BridgeState::NotConnected => false, + BridgeState::Connected(_) | BridgeState::InUse => true, + }); + if !hide { + prev(info) + } + })); + }); + + BRIDGE_STATE.with(|state| state.set(BridgeState::Connected(self), f)) + } + + fn with(f: impl FnOnce(&mut Bridge) -> R) -> R { + BridgeState::with(|state| match state { + BridgeState::NotConnected => { + panic!("procedural macro API is used outside of a procedural macro"); + } + BridgeState::InUse => { + panic!("procedural macro API is used while it's already in use"); + } + BridgeState::Connected(bridge) => f(bridge), + }) + } +} + +/// A client-side "global object" (usually a function pointer), +/// which may be using a different `proc_macro` from the one +/// used by the server, but can be interacted with compatibly. +/// +/// NB: `F` must have FFI-friendly memory layout (e.g. a pointer). +/// The call ABI of function pointers used for `F` doesn't +/// need to match between server and client, since it's only +/// passed between them and (eventually) called by the client. +#[repr(C)] +#[derive(Copy, Clone)] +pub struct Client { + pub(super) get_handle_counters: extern "C" fn() -> &'static HandleCounters, + pub(super) run: extern "C" fn(Bridge, F) -> Buffer, + pub(super) f: F, +} + +// FIXME(#53451) public to work around `Cannot create local mono-item` ICE, +// affecting not only the function itself, but also the `BridgeState` `thread_local!`. +pub extern "C" fn __run_expand1( + mut bridge: Bridge, + f: fn(::TokenStream) -> ::TokenStream, +) -> Buffer { + // The initial `cached_buffer` contains the input. + let mut b = bridge.cached_buffer.take(); + + panic::catch_unwind(panic::AssertUnwindSafe(|| { + bridge.enter(|| { + let reader = &mut &b[..]; + let input = TokenStream::decode(reader, &mut ()); + + // Put the `cached_buffer` back in the `Bridge`, for requests. + Bridge::with(|bridge| bridge.cached_buffer = b.take()); + + let output = f(::TokenStream(input)).0; + + // Take the `cached_buffer` back out, for the output value. + b = Bridge::with(|bridge| bridge.cached_buffer.take()); + + // HACK(eddyb) Separate encoding a success value (`Ok(output)`) + // from encoding a panic (`Err(e: PanicMessage)`) to avoid + // having handles outside the `bridge.enter(|| ...)` scope, and + // to catch panics that could happen while encoding the success. + // + // Note that panics should be impossible beyond this point, but + // this is defensively trying to avoid any accidental panicking + // reaching the `extern "C"` (which should `abort` but may not + // at the moment, so this is also potentially preventing UB). + b.clear(); + Ok::<_, ()>(output).encode(&mut b, &mut ()); + }) + })) + .map_err(PanicMessage::from) + .unwrap_or_else(|e| { + b.clear(); + Err::<(), _>(e).encode(&mut b, &mut ()); + }); + b +} + +impl Client ::TokenStream> { + pub const fn expand1(f: fn(::TokenStream) -> ::TokenStream) -> Self { + Client { + get_handle_counters: HandleCounters::get, + run: __run_expand1, + f, + } + } +} + +// FIXME(#53451) public to work around `Cannot create local mono-item` ICE, +// affecting not only the function itself, but also the `BridgeState` `thread_local!`. +pub extern "C" fn __run_expand2( + mut bridge: Bridge, + f: fn(::TokenStream, ::TokenStream) -> ::TokenStream, +) -> Buffer { + // The initial `cached_buffer` contains the input. + let mut b = bridge.cached_buffer.take(); + + panic::catch_unwind(panic::AssertUnwindSafe(|| { + bridge.enter(|| { + let reader = &mut &b[..]; + let input = TokenStream::decode(reader, &mut ()); + let input2 = TokenStream::decode(reader, &mut ()); + + // Put the `cached_buffer` back in the `Bridge`, for requests. + Bridge::with(|bridge| bridge.cached_buffer = b.take()); + + let output = f(::TokenStream(input), ::TokenStream(input2)).0; + + // Take the `cached_buffer` back out, for the output value. + b = Bridge::with(|bridge| bridge.cached_buffer.take()); + + // HACK(eddyb) Separate encoding a success value (`Ok(output)`) + // from encoding a panic (`Err(e: PanicMessage)`) to avoid + // having handles outside the `bridge.enter(|| ...)` scope, and + // to catch panics that could happen while encoding the success. + // + // Note that panics should be impossible beyond this point, but + // this is defensively trying to avoid any accidental panicking + // reaching the `extern "C"` (which should `abort` but may not + // at the moment, so this is also potentially preventing UB). + b.clear(); + Ok::<_, ()>(output).encode(&mut b, &mut ()); + }) + })) + .map_err(PanicMessage::from) + .unwrap_or_else(|e| { + b.clear(); + Err::<(), _>(e).encode(&mut b, &mut ()); + }); + b +} + +impl Client ::TokenStream> { + pub const fn expand2(f: fn(::TokenStream, ::TokenStream) -> ::TokenStream) -> Self { + Client { + get_handle_counters: HandleCounters::get, + run: __run_expand2, + f, + } + } +} + +#[repr(C)] +#[derive(Copy, Clone)] +pub enum ProcMacro { + CustomDerive { + trait_name: &'static str, + attributes: &'static [&'static str], + client: Client ::TokenStream>, + }, + + Attr { + name: &'static str, + client: Client ::TokenStream>, + }, + + Bang { + name: &'static str, + client: Client ::TokenStream>, + }, +} + +impl ProcMacro { + pub const fn custom_derive( + trait_name: &'static str, + attributes: &'static [&'static str], + expand: fn(::TokenStream) -> ::TokenStream, + ) -> Self { + ProcMacro::CustomDerive { + trait_name, + attributes, + client: Client::expand1(expand), + } + } + + pub const fn attr( + name: &'static str, + expand: fn(::TokenStream, ::TokenStream) -> ::TokenStream, + ) -> Self { + ProcMacro::Attr { + name, + client: Client::expand2(expand), + } + } + + pub const fn bang(name: &'static str, expand: fn(::TokenStream) -> ::TokenStream) -> Self { + ProcMacro::Bang { + name, + client: Client::expand1(expand), + } + } +} diff --git a/src/libproc_macro/bridge/closure.rs b/src/libproc_macro/bridge/closure.rs new file mode 100644 index 0000000000..92fe7baae0 --- /dev/null +++ b/src/libproc_macro/bridge/closure.rs @@ -0,0 +1,42 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Closure type (equivalent to `&mut dyn FnMut(A) -> R`) that's `repr(C)`. + +#[repr(C)] +pub struct Closure<'a, A, R> { + call: unsafe extern "C" fn(&mut Env, A) -> R, + env: &'a mut Env, +} + +extern "C" { + type Env; +} + +impl<'a, A, R> !Sync for Closure<'a, A, R> {} +impl<'a, A, R> !Send for Closure<'a, A, R> {} + +impl<'a, A, R, F: FnMut(A) -> R> From<&'a mut F> for Closure<'a, A, R> { + fn from(f: &'a mut F) -> Self { + unsafe extern "C" fn call R>(env: &mut Env, arg: A) -> R { + (*(env as *mut _ as *mut F))(arg) + } + Closure { + call: call::, + env: unsafe { &mut *(f as *mut _ as *mut Env) }, + } + } +} + +impl<'a, A, R> Closure<'a, A, R> { + pub fn call(&mut self, arg: A) -> R { + unsafe { (self.call)(self.env, arg) } + } +} diff --git a/src/libproc_macro/bridge/handle.rs b/src/libproc_macro/bridge/handle.rs new file mode 100644 index 0000000000..5c91a1408a --- /dev/null +++ b/src/libproc_macro/bridge/handle.rs @@ -0,0 +1,92 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Server-side handles and storage for per-handle data. + +use std::collections::{BTreeMap, HashMap}; +use std::hash::Hash; +use std::num::NonZeroU32; +use std::ops::{Index, IndexMut}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +pub(super) type Handle = NonZeroU32; + +pub(super) struct OwnedStore { + counter: &'static AtomicUsize, + data: BTreeMap, +} + +impl OwnedStore { + pub(super) fn new(counter: &'static AtomicUsize) -> Self { + // Ensure the handle counter isn't 0, which would panic later, + // when `NonZeroU32::new` (aka `Handle::new`) is called in `alloc`. + assert_ne!(counter.load(Ordering::SeqCst), 0); + + OwnedStore { + counter, + data: BTreeMap::new(), + } + } +} + +impl OwnedStore { + pub(super) fn alloc(&mut self, x: T) -> Handle { + let counter = self.counter.fetch_add(1, Ordering::SeqCst); + let handle = Handle::new(counter as u32).expect("`proc_macro` handle counter overflowed"); + assert!(self.data.insert(handle, x).is_none()); + handle + } + + pub(super) fn take(&mut self, h: Handle) -> T { + self.data + .remove(&h) + .expect("use-after-free in `proc_macro` handle") + } +} + +impl Index for OwnedStore { + type Output = T; + fn index(&self, h: Handle) -> &T { + self.data + .get(&h) + .expect("use-after-free in `proc_macro` handle") + } +} + +impl IndexMut for OwnedStore { + fn index_mut(&mut self, h: Handle) -> &mut T { + self.data + .get_mut(&h) + .expect("use-after-free in `proc_macro` handle") + } +} + +pub(super) struct InternedStore { + owned: OwnedStore, + interner: HashMap, +} + +impl InternedStore { + pub(super) fn new(counter: &'static AtomicUsize) -> Self { + InternedStore { + owned: OwnedStore::new(counter), + interner: HashMap::new(), + } + } + + pub(super) fn alloc(&mut self, x: T) -> Handle { + let owned = &mut self.owned; + *self.interner.entry(x).or_insert_with(|| owned.alloc(x)) + } + + pub(super) fn copy(&mut self, h: Handle) -> T { + self.owned[h] + } +} diff --git a/src/libproc_macro/bridge/mod.rs b/src/libproc_macro/bridge/mod.rs new file mode 100644 index 0000000000..f03c63fc04 --- /dev/null +++ b/src/libproc_macro/bridge/mod.rs @@ -0,0 +1,413 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Internal interface for communicating between a `proc_macro` client +//! (a proc macro crate) and a `proc_macro` server (a compiler front-end). +//! +//! Serialization (with C ABI buffers) and unique integer handles are employed +//! to allow safely interfacing between two copies of `proc_macro` built +//! (from the same source) by different compilers with potentially mismatching +//! Rust ABIs (e.g. stage0/bin/rustc vs stage1/bin/rustc during bootstrap). + +#![deny(unsafe_code)] + +use std::fmt; +use std::hash::Hash; +use std::marker; +use std::mem; +use std::ops::Bound; +use std::panic; +use std::sync::atomic::AtomicUsize; +use std::sync::Once; +use std::thread; +use {Delimiter, Level, LineColumn, Spacing}; + +/// Higher-order macro describing the server RPC API, allowing automatic +/// generation of type-safe Rust APIs, both client-side and server-side. +/// +/// `with_api!(MySelf, my_self, my_macro)` expands to: +/// ```rust,ignore (pseudo-code) +/// my_macro! { +/// // ... +/// Literal { +/// // ... +/// fn character(ch: char) -> MySelf::Literal; +/// // ... +/// fn span(my_self: &MySelf::Literal) -> MySelf::Span; +/// fn set_span(my_self: &mut MySelf::Literal, span: MySelf::Span); +/// }, +/// // ... +/// } +/// ``` +/// +/// The first two arguments serve to customize the arguments names +/// and argument/return types, to enable several different usecases: +/// +/// If `my_self` is just `self`, then each `fn` signature can be used +/// as-is for a method. If it's anything else (`self_` in practice), +/// then the signatures don't have a special `self` argument, and +/// can, therefore, have a different one introduced. +/// +/// If `MySelf` is just `Self`, then the types are only valid inside +/// a trait or a trait impl, where the trait has associated types +/// for each of the API types. If non-associated types are desired, +/// a module name (`self` in practice) can be used instead of `Self`. +macro_rules! with_api { + ($S:ident, $self:ident, $m:ident) => { + $m! { + TokenStream { + fn drop($self: $S::TokenStream); + fn clone($self: &$S::TokenStream) -> $S::TokenStream; + fn new() -> $S::TokenStream; + fn is_empty($self: &$S::TokenStream) -> bool; + fn from_str(src: &str) -> $S::TokenStream; + fn to_string($self: &$S::TokenStream) -> String; + fn from_token_tree( + tree: TokenTree<$S::Group, $S::Punct, $S::Ident, $S::Literal>, + ) -> $S::TokenStream; + fn into_iter($self: $S::TokenStream) -> $S::TokenStreamIter; + }, + TokenStreamBuilder { + fn drop($self: $S::TokenStreamBuilder); + fn new() -> $S::TokenStreamBuilder; + fn push($self: &mut $S::TokenStreamBuilder, stream: $S::TokenStream); + fn build($self: $S::TokenStreamBuilder) -> $S::TokenStream; + }, + TokenStreamIter { + fn drop($self: $S::TokenStreamIter); + fn clone($self: &$S::TokenStreamIter) -> $S::TokenStreamIter; + fn next( + $self: &mut $S::TokenStreamIter, + ) -> Option>; + }, + Group { + fn drop($self: $S::Group); + fn clone($self: &$S::Group) -> $S::Group; + fn new(delimiter: Delimiter, stream: $S::TokenStream) -> $S::Group; + fn delimiter($self: &$S::Group) -> Delimiter; + fn stream($self: &$S::Group) -> $S::TokenStream; + fn span($self: &$S::Group) -> $S::Span; + fn span_open($self: &$S::Group) -> $S::Span; + fn span_close($self: &$S::Group) -> $S::Span; + fn set_span($self: &mut $S::Group, span: $S::Span); + }, + Punct { + fn new(ch: char, spacing: Spacing) -> $S::Punct; + fn as_char($self: $S::Punct) -> char; + fn spacing($self: $S::Punct) -> Spacing; + fn span($self: $S::Punct) -> $S::Span; + fn with_span($self: $S::Punct, span: $S::Span) -> $S::Punct; + }, + Ident { + fn new(string: &str, span: $S::Span, is_raw: bool) -> $S::Ident; + fn span($self: $S::Ident) -> $S::Span; + fn with_span($self: $S::Ident, span: $S::Span) -> $S::Ident; + }, + Literal { + fn drop($self: $S::Literal); + fn clone($self: &$S::Literal) -> $S::Literal; + // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. + fn debug($self: &$S::Literal) -> String; + fn integer(n: &str) -> $S::Literal; + fn typed_integer(n: &str, kind: &str) -> $S::Literal; + fn float(n: &str) -> $S::Literal; + fn f32(n: &str) -> $S::Literal; + fn f64(n: &str) -> $S::Literal; + fn string(string: &str) -> $S::Literal; + fn character(ch: char) -> $S::Literal; + fn byte_string(bytes: &[u8]) -> $S::Literal; + fn span($self: &$S::Literal) -> $S::Span; + fn set_span($self: &mut $S::Literal, span: $S::Span); + fn subspan( + $self: &$S::Literal, + start: Bound, + end: Bound, + ) -> Option<$S::Span>; + }, + SourceFile { + fn drop($self: $S::SourceFile); + fn clone($self: &$S::SourceFile) -> $S::SourceFile; + fn eq($self: &$S::SourceFile, other: &$S::SourceFile) -> bool; + fn path($self: &$S::SourceFile) -> String; + fn is_real($self: &$S::SourceFile) -> bool; + }, + MultiSpan { + fn drop($self: $S::MultiSpan); + fn new() -> $S::MultiSpan; + fn push($self: &mut $S::MultiSpan, span: $S::Span); + }, + Diagnostic { + fn drop($self: $S::Diagnostic); + fn new(level: Level, msg: &str, span: $S::MultiSpan) -> $S::Diagnostic; + fn sub( + $self: &mut $S::Diagnostic, + level: Level, + msg: &str, + span: $S::MultiSpan, + ); + fn emit($self: $S::Diagnostic); + }, + Span { + fn debug($self: $S::Span) -> String; + fn def_site() -> $S::Span; + fn call_site() -> $S::Span; + fn source_file($self: $S::Span) -> $S::SourceFile; + fn parent($self: $S::Span) -> Option<$S::Span>; + fn source($self: $S::Span) -> $S::Span; + fn start($self: $S::Span) -> LineColumn; + fn end($self: $S::Span) -> LineColumn; + fn join($self: $S::Span, other: $S::Span) -> Option<$S::Span>; + fn resolved_at($self: $S::Span, at: $S::Span) -> $S::Span; + }, + } + }; +} + +// FIXME(eddyb) this calls `encode` for each argument, but in reverse, +// to avoid borrow conflicts from borrows started by `&mut` arguments. +macro_rules! reverse_encode { + ($writer:ident;) => {}; + ($writer:ident; $first:ident $(, $rest:ident)*) => { + reverse_encode!($writer; $($rest),*); + $first.encode(&mut $writer, &mut ()); + } +} + +// FIXME(eddyb) this calls `decode` for each argument, but in reverse, +// to avoid borrow conflicts from borrows started by `&mut` arguments. +macro_rules! reverse_decode { + ($reader:ident, $s:ident;) => {}; + ($reader:ident, $s:ident; $first:ident: $first_ty:ty $(, $rest:ident: $rest_ty:ty)*) => { + reverse_decode!($reader, $s; $($rest: $rest_ty),*); + let $first = <$first_ty>::decode(&mut $reader, $s); + } +} + +#[allow(unsafe_code)] +mod buffer; +#[forbid(unsafe_code)] +pub mod client; +#[allow(unsafe_code)] +mod closure; +#[forbid(unsafe_code)] +mod handle; +#[macro_use] +#[forbid(unsafe_code)] +mod rpc; +#[allow(unsafe_code)] +mod scoped_cell; +#[forbid(unsafe_code)] +pub mod server; + +use self::buffer::Buffer; +pub use self::rpc::PanicMessage; +use self::rpc::{Decode, DecodeMut, Encode, Reader, Writer}; + +/// An active connection between a server and a client. +/// The server creates the bridge (`Bridge::run_server` in `server.rs`), +/// then passes it to the client through the function pointer in the `run` +/// field of `client::Client`. The client holds its copy of the `Bridge` +/// in TLS during its execution (`Bridge::{enter, with}` in `client.rs`). +#[repr(C)] +pub struct Bridge<'a> { + /// Reusable buffer (only `clear`-ed, never shrunk), primarily + /// used for making requests, but also for passing input to client. + cached_buffer: Buffer, + + /// Server-side function that the client uses to make requests. + dispatch: closure::Closure<'a, Buffer, Buffer>, +} + +impl<'a> !Sync for Bridge<'a> {} +impl<'a> !Send for Bridge<'a> {} + +#[forbid(unsafe_code)] +#[allow(non_camel_case_types)] +mod api_tags { + use super::rpc::{DecodeMut, Encode, Reader, Writer}; + + macro_rules! declare_tags { + ($($name:ident { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* + }),* $(,)*) => { + $( + pub(super) enum $name { + $($method),* + } + rpc_encode_decode!(enum $name { $($method),* }); + )* + + + pub(super) enum Method { + $($name($name)),* + } + rpc_encode_decode!(enum Method { $($name(m)),* }); + } + } + with_api!(self, self, declare_tags); +} + +/// Helper to wrap associated types to allow trait impl dispatch. +/// That is, normally a pair of impls for `T::Foo` and `T::Bar` +/// can overlap, but if the impls are, instead, on types like +/// `Marked` and `Marked`, they can't. +trait Mark { + type Unmarked; + fn mark(unmarked: Self::Unmarked) -> Self; +} + +/// Unwrap types wrapped by `Mark::mark` (see `Mark` for details). +trait Unmark { + type Unmarked; + fn unmark(self) -> Self::Unmarked; +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +struct Marked { + value: T, + _marker: marker::PhantomData, +} + +impl Mark for Marked { + type Unmarked = T; + fn mark(unmarked: Self::Unmarked) -> Self { + Marked { + value: unmarked, + _marker: marker::PhantomData, + } + } +} +impl Unmark for Marked { + type Unmarked = T; + fn unmark(self) -> Self::Unmarked { + self.value + } +} +impl Unmark for &'a Marked { + type Unmarked = &'a T; + fn unmark(self) -> Self::Unmarked { + &self.value + } +} +impl Unmark for &'a mut Marked { + type Unmarked = &'a mut T; + fn unmark(self) -> Self::Unmarked { + &mut self.value + } +} + +impl Mark for Option { + type Unmarked = Option; + fn mark(unmarked: Self::Unmarked) -> Self { + unmarked.map(T::mark) + } +} +impl Unmark for Option { + type Unmarked = Option; + fn unmark(self) -> Self::Unmarked { + self.map(T::unmark) + } +} + +macro_rules! mark_noop { + ($($ty:ty),* $(,)*) => { + $( + impl Mark for $ty { + type Unmarked = Self; + fn mark(unmarked: Self::Unmarked) -> Self { + unmarked + } + } + impl Unmark for $ty { + type Unmarked = Self; + fn unmark(self) -> Self::Unmarked { + self + } + } + )* + } +} +mark_noop! { + (), + bool, + char, + &'a [u8], + &'a str, + String, + Delimiter, + Level, + LineColumn, + Spacing, + Bound, +} + +rpc_encode_decode!( + enum Delimiter { + Parenthesis, + Brace, + Bracket, + None, + } +); +rpc_encode_decode!( + enum Level { + Error, + Warning, + Note, + Help, + } +); +rpc_encode_decode!(struct LineColumn { line, column }); +rpc_encode_decode!( + enum Spacing { + Alone, + Joint, + } +); + +#[derive(Clone)] +pub enum TokenTree { + Group(G), + Punct(P), + Ident(I), + Literal(L), +} + +impl Mark for TokenTree { + type Unmarked = TokenTree; + fn mark(unmarked: Self::Unmarked) -> Self { + match unmarked { + TokenTree::Group(tt) => TokenTree::Group(G::mark(tt)), + TokenTree::Punct(tt) => TokenTree::Punct(P::mark(tt)), + TokenTree::Ident(tt) => TokenTree::Ident(I::mark(tt)), + TokenTree::Literal(tt) => TokenTree::Literal(L::mark(tt)), + } + } +} +impl Unmark for TokenTree { + type Unmarked = TokenTree; + fn unmark(self) -> Self::Unmarked { + match self { + TokenTree::Group(tt) => TokenTree::Group(tt.unmark()), + TokenTree::Punct(tt) => TokenTree::Punct(tt.unmark()), + TokenTree::Ident(tt) => TokenTree::Ident(tt.unmark()), + TokenTree::Literal(tt) => TokenTree::Literal(tt.unmark()), + } + } +} + +rpc_encode_decode!( + enum TokenTree { + Group(tt), + Punct(tt), + Ident(tt), + Literal(tt), + } +); diff --git a/src/libproc_macro/bridge/rpc.rs b/src/libproc_macro/bridge/rpc.rs new file mode 100644 index 0000000000..fafc3d0074 --- /dev/null +++ b/src/libproc_macro/bridge/rpc.rs @@ -0,0 +1,319 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Serialization for client<->server communication. + +use std::any::Any; +use std::char; +use std::io::Write; +use std::num::NonZeroU32; +use std::ops::Bound; +use std::str; + +pub(super) type Writer = super::buffer::Buffer; + +pub(super) trait Encode: Sized { + fn encode(self, w: &mut Writer, s: &mut S); +} + +pub(super) type Reader<'a> = &'a [u8]; + +pub(super) trait Decode<'a, 's, S>: Sized { + fn decode(r: &mut Reader<'a>, s: &'s S) -> Self; +} + +pub(super) trait DecodeMut<'a, 's, S>: Sized { + fn decode(r: &mut Reader<'a>, s: &'s mut S) -> Self; +} + +macro_rules! rpc_encode_decode { + (uleb128 $ty:ty) => { + impl Encode for $ty { + fn encode(mut self, w: &mut Writer, s: &mut S) { + let mut byte = 0x80; + while byte & 0x80 != 0 { + byte = (self & 0x7f) as u8; + self >>= 7; + if self != 0 { + byte |= 0x80; + } + byte.encode(w, s); + } + } + } + + impl DecodeMut<'_, '_, S> for $ty { + fn decode(r: &mut Reader, s: &mut S) -> Self { + let mut byte = 0x80; + let mut v = 0; + let mut shift = 0; + while byte & 0x80 != 0 { + byte = u8::decode(r, s); + v |= ((byte & 0x7f) as Self) << shift; + shift += 7; + } + v + } + } + }; + (struct $name:ident { $($field:ident),* $(,)* }) => { + impl Encode for $name { + fn encode(self, w: &mut Writer, s: &mut S) { + $(self.$field.encode(w, s);)* + } + } + + impl DecodeMut<'_, '_, S> for $name { + fn decode(r: &mut Reader, s: &mut S) -> Self { + $name { + $($field: DecodeMut::decode(r, s)),* + } + } + } + }; + (enum $name:ident $(<$($T:ident),+>)* { $($variant:ident $(($field:ident))*),* $(,)* }) => { + impl),+)*> Encode for $name $(<$($T),+>)* { + fn encode(self, w: &mut Writer, s: &mut S) { + // HACK(eddyb) `Tag` enum duplicated between the + // two impls as there's no other place to stash it. + #[repr(u8)] enum Tag { $($variant),* } + #[allow(non_upper_case_globals)] + impl Tag { $(const $variant: u8 = Tag::$variant as u8;)* } + + match self { + $($name::$variant $(($field))* => { + ::$variant.encode(w, s); + $($field.encode(w, s);)* + })* + } + } + } + + impl DecodeMut<'a, 's, S>),+)*> DecodeMut<'a, '_, S> + for $name $(<$($T),+>)* + { + fn decode(r: &mut Reader<'a>, s: &mut S) -> Self { + // HACK(eddyb) `Tag` enum duplicated between the + // two impls as there's no other place to stash it. + #[repr(u8)] enum Tag { $($variant),* } + #[allow(non_upper_case_globals)] + impl Tag { $(const $variant: u8 = Tag::$variant as u8;)* } + + match u8::decode(r, s) { + $(::$variant => { + $(let $field = DecodeMut::decode(r, s);)* + $name::$variant $(($field))* + })* + _ => unreachable!(), + } + } + } + } +} + +impl Encode for () { + fn encode(self, _: &mut Writer, _: &mut S) {} +} + +impl DecodeMut<'_, '_, S> for () { + fn decode(_: &mut Reader, _: &mut S) -> Self {} +} + +impl Encode for u8 { + fn encode(self, w: &mut Writer, _: &mut S) { + w.write_all(&[self]).unwrap(); + } +} + +impl DecodeMut<'_, '_, S> for u8 { + fn decode(r: &mut Reader, _: &mut S) -> Self { + let x = r[0]; + *r = &r[1..]; + x + } +} + +rpc_encode_decode!(uleb128 u32); +rpc_encode_decode!(uleb128 usize); + +impl Encode for bool { + fn encode(self, w: &mut Writer, s: &mut S) { + (self as u8).encode(w, s); + } +} + +impl DecodeMut<'_, '_, S> for bool { + fn decode(r: &mut Reader, s: &mut S) -> Self { + match u8::decode(r, s) { + 0 => false, + 1 => true, + _ => unreachable!(), + } + } +} + +impl Encode for char { + fn encode(self, w: &mut Writer, s: &mut S) { + (self as u32).encode(w, s); + } +} + +impl DecodeMut<'_, '_, S> for char { + fn decode(r: &mut Reader, s: &mut S) -> Self { + char::from_u32(u32::decode(r, s)).unwrap() + } +} + +impl Encode for NonZeroU32 { + fn encode(self, w: &mut Writer, s: &mut S) { + self.get().encode(w, s); + } +} + +impl DecodeMut<'_, '_, S> for NonZeroU32 { + fn decode(r: &mut Reader, s: &mut S) -> Self { + Self::new(u32::decode(r, s)).unwrap() + } +} + +impl, B: Encode> Encode for (A, B) { + fn encode(self, w: &mut Writer, s: &mut S) { + self.0.encode(w, s); + self.1.encode(w, s); + } +} + +impl DecodeMut<'a, 's, S>, B: for<'s> DecodeMut<'a, 's, S>> DecodeMut<'a, '_, S> + for (A, B) +{ + fn decode(r: &mut Reader<'a>, s: &mut S) -> Self { + (DecodeMut::decode(r, s), DecodeMut::decode(r, s)) + } +} + +rpc_encode_decode!( + enum Bound { + Included(x), + Excluded(x), + Unbounded, + } +); + +rpc_encode_decode!( + enum Option { + None, + Some(x), + } +); + +rpc_encode_decode!( + enum Result { + Ok(x), + Err(e), + } +); + +impl Encode for &[u8] { + fn encode(self, w: &mut Writer, s: &mut S) { + self.len().encode(w, s); + w.write_all(self).unwrap(); + } +} + +impl DecodeMut<'a, '_, S> for &'a [u8] { + fn decode(r: &mut Reader<'a>, s: &mut S) -> Self { + let len = usize::decode(r, s); + let xs = &r[..len]; + *r = &r[len..]; + xs + } +} + +impl Encode for &str { + fn encode(self, w: &mut Writer, s: &mut S) { + self.as_bytes().encode(w, s); + } +} + +impl DecodeMut<'a, '_, S> for &'a str { + fn decode(r: &mut Reader<'a>, s: &mut S) -> Self { + str::from_utf8(<&[u8]>::decode(r, s)).unwrap() + } +} + +impl Encode for String { + fn encode(self, w: &mut Writer, s: &mut S) { + self[..].encode(w, s); + } +} + +impl DecodeMut<'_, '_, S> for String { + fn decode(r: &mut Reader, s: &mut S) -> Self { + <&str>::decode(r, s).to_string() + } +} + +/// Simplied version of panic payloads, ignoring +/// types other than `&'static str` and `String`. +pub enum PanicMessage { + StaticStr(&'static str), + String(String), + Unknown, +} + +impl From> for PanicMessage { + fn from(payload: Box) -> Self { + if let Some(s) = payload.downcast_ref::<&'static str>() { + return PanicMessage::StaticStr(s); + } + if let Ok(s) = payload.downcast::() { + return PanicMessage::String(*s); + } + PanicMessage::Unknown + } +} + +impl Into> for PanicMessage { + fn into(self) -> Box { + match self { + PanicMessage::StaticStr(s) => Box::new(s), + PanicMessage::String(s) => Box::new(s), + PanicMessage::Unknown => { + struct UnknownPanicMessage; + Box::new(UnknownPanicMessage) + } + } + } +} + +impl PanicMessage { + pub fn as_str(&self) -> Option<&str> { + match self { + PanicMessage::StaticStr(s) => Some(s), + PanicMessage::String(s) => Some(s), + PanicMessage::Unknown => None, + } + } +} + +impl Encode for PanicMessage { + fn encode(self, w: &mut Writer, s: &mut S) { + self.as_str().encode(w, s); + } +} + +impl DecodeMut<'_, '_, S> for PanicMessage { + fn decode(r: &mut Reader, s: &mut S) -> Self { + match Option::::decode(r, s) { + Some(s) => PanicMessage::String(s), + None => PanicMessage::Unknown, + } + } +} diff --git a/src/libproc_macro/bridge/scoped_cell.rs b/src/libproc_macro/bridge/scoped_cell.rs new file mode 100644 index 0000000000..51d1fece79 --- /dev/null +++ b/src/libproc_macro/bridge/scoped_cell.rs @@ -0,0 +1,90 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! `Cell` variant for (scoped) existential lifetimes. + +use std::cell::Cell; +use std::mem; +use std::ops::{Deref, DerefMut}; + +/// Type lambda application, with a lifetime. +pub trait ApplyL<'a> { + type Out; +} + +/// Type lambda taking a lifetime, i.e. `Lifetime -> Type`. +pub trait LambdaL: for<'a> ApplyL<'a> {} + +impl ApplyL<'a>> LambdaL for T {} + +// HACK(eddyb) work around projection limitations with a newtype +// FIXME(#52812) replace with `&'a mut >::Out` +pub struct RefMutL<'a, 'b, T: LambdaL>(&'a mut >::Out); + +impl<'a, 'b, T: LambdaL> Deref for RefMutL<'a, 'b, T> { + type Target = >::Out; + fn deref(&self) -> &Self::Target { + self.0 + } +} + +impl<'a, 'b, T: LambdaL> DerefMut for RefMutL<'a, 'b, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.0 + } +} + +pub struct ScopedCell(Cell<>::Out>); + +impl ScopedCell { + pub const fn new(value: >::Out) -> Self { + ScopedCell(Cell::new(value)) + } + + /// Set the value in `self` to `replacement` while + /// running `f`, which gets the old value, mutably. + /// The old value will be restored after `f` exits, even + /// by panic, including modifications made to it by `f`. + pub fn replace<'a, R>( + &self, + replacement: >::Out, + f: impl for<'b, 'c> FnOnce(RefMutL<'b, 'c, T>) -> R, + ) -> R { + /// Wrapper that ensures that the cell always gets filled + /// (with the original state, optionally changed by `f`), + /// even if `f` had panicked. + struct PutBackOnDrop<'a, T: LambdaL> { + cell: &'a ScopedCell, + value: Option<>::Out>, + } + + impl<'a, T: LambdaL> Drop for PutBackOnDrop<'a, T> { + fn drop(&mut self) { + self.cell.0.set(self.value.take().unwrap()); + } + } + + let mut put_back_on_drop = PutBackOnDrop { + cell: self, + value: Some(self.0.replace(unsafe { + let erased = mem::transmute_copy(&replacement); + mem::forget(replacement); + erased + })), + }; + + f(RefMutL(put_back_on_drop.value.as_mut().unwrap())) + } + + /// Set the value in `self` to `value` while running `f`. + pub fn set<'a, R>(&self, value: >::Out, f: impl FnOnce() -> R) -> R { + self.replace(value, |_| f()) + } +} diff --git a/src/libproc_macro/bridge/server.rs b/src/libproc_macro/bridge/server.rs new file mode 100644 index 0000000000..f500b17d1c --- /dev/null +++ b/src/libproc_macro/bridge/server.rs @@ -0,0 +1,352 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Server-side traits. + +use super::*; + +// FIXME(eddyb) generate the definition of `HandleStore` in `server.rs`. +use super::client::HandleStore; + +/// Declare an associated item of one of the traits below, optionally +/// adjusting it (i.e. adding bounds to types and default bodies to methods). +macro_rules! associated_item { + (type TokenStream) => + (type TokenStream: 'static + Clone;); + (type TokenStreamBuilder) => + (type TokenStreamBuilder: 'static;); + (type TokenStreamIter) => + (type TokenStreamIter: 'static + Clone;); + (type Group) => + (type Group: 'static + Clone;); + (type Punct) => + (type Punct: 'static + Copy + Eq + Hash;); + (type Ident) => + (type Ident: 'static + Copy + Eq + Hash;); + (type Literal) => + (type Literal: 'static + Clone;); + (type SourceFile) => + (type SourceFile: 'static + Clone;); + (type MultiSpan) => + (type MultiSpan: 'static;); + (type Diagnostic) => + (type Diagnostic: 'static;); + (type Span) => + (type Span: 'static + Copy + Eq + Hash;); + (fn drop(&mut self, $arg:ident: $arg_ty:ty)) => + (fn drop(&mut self, $arg: $arg_ty) { mem::drop($arg) }); + (fn clone(&mut self, $arg:ident: $arg_ty:ty) -> $ret_ty:ty) => + (fn clone(&mut self, $arg: $arg_ty) -> $ret_ty { $arg.clone() }); + ($($item:tt)*) => ($($item)*;) +} + +macro_rules! declare_server_traits { + ($($name:ident { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* + }),* $(,)*) => { + pub trait Types { + $(associated_item!(type $name);)* + } + + $(pub trait $name: Types { + $(associated_item!(fn $method(&mut self, $($arg: $arg_ty),*) $(-> $ret_ty)*);)* + })* + + pub trait Server: Types $(+ $name)* {} + impl Server for S {} + } +} +with_api!(Self, self_, declare_server_traits); + +pub(super) struct MarkedTypes(S); + +macro_rules! define_mark_types_impls { + ($($name:ident { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* + }),* $(,)*) => { + impl Types for MarkedTypes { + $(type $name = Marked;)* + } + + $(impl $name for MarkedTypes { + $(fn $method(&mut self, $($arg: $arg_ty),*) $(-> $ret_ty)* { + <_>::mark($name::$method(&mut self.0, $($arg.unmark()),*)) + })* + })* + } +} +with_api!(Self, self_, define_mark_types_impls); + +struct Dispatcher { + handle_store: HandleStore, + server: S, +} + +macro_rules! define_dispatcher_impl { + ($($name:ident { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* + }),* $(,)*) => { + // FIXME(eddyb) `pub` only for `ExecutionStrategy` below. + pub trait DispatcherTrait { + // HACK(eddyb) these are here to allow `Self::$name` to work below. + $(type $name;)* + fn dispatch(&mut self, b: Buffer) -> Buffer; + } + + impl DispatcherTrait for Dispatcher> { + $(type $name = as Types>::$name;)* + fn dispatch(&mut self, mut b: Buffer) -> Buffer { + let Dispatcher { handle_store, server } = self; + + let mut reader = &b[..]; + match api_tags::Method::decode(&mut reader, &mut ()) { + $(api_tags::Method::$name(m) => match m { + $(api_tags::$name::$method => { + let mut call_method = || { + reverse_decode!(reader, handle_store; $($arg: $arg_ty),*); + $name::$method(server, $($arg),*) + }; + // HACK(eddyb) don't use `panic::catch_unwind` in a panic. + // If client and server happen to use the same `libstd`, + // `catch_unwind` asserts that the panic counter was 0, + // even when the closure passed to it didn't panic. + let r = if thread::panicking() { + Ok(call_method()) + } else { + panic::catch_unwind(panic::AssertUnwindSafe(call_method)) + .map_err(PanicMessage::from) + }; + + b.clear(); + r.encode(&mut b, handle_store); + })* + }),* + } + b + } + } + } +} +with_api!(Self, self_, define_dispatcher_impl); + +pub trait ExecutionStrategy { + fn run_bridge_and_client( + &self, + dispatcher: &mut impl DispatcherTrait, + input: Buffer, + run_client: extern "C" fn(Bridge, D) -> Buffer, + client_data: D, + ) -> Buffer; +} + +pub struct SameThread; + +impl ExecutionStrategy for SameThread { + fn run_bridge_and_client( + &self, + dispatcher: &mut impl DispatcherTrait, + input: Buffer, + run_client: extern "C" fn(Bridge, D) -> Buffer, + client_data: D, + ) -> Buffer { + let mut dispatch = |b| dispatcher.dispatch(b); + + run_client( + Bridge { + cached_buffer: input, + dispatch: (&mut dispatch).into(), + }, + client_data, + ) + } +} + +// NOTE(eddyb) Two implementations are provided, the second one is a bit +// faster but neither is anywhere near as fast as same-thread execution. + +pub struct CrossThread1; + +impl ExecutionStrategy for CrossThread1 { + fn run_bridge_and_client( + &self, + dispatcher: &mut impl DispatcherTrait, + input: Buffer, + run_client: extern "C" fn(Bridge, D) -> Buffer, + client_data: D, + ) -> Buffer { + use std::sync::mpsc::channel; + + let (req_tx, req_rx) = channel(); + let (res_tx, res_rx) = channel(); + + let join_handle = thread::spawn(move || { + let mut dispatch = |b| { + req_tx.send(b).unwrap(); + res_rx.recv().unwrap() + }; + + run_client( + Bridge { + cached_buffer: input, + dispatch: (&mut dispatch).into(), + }, + client_data, + ) + }); + + for b in req_rx { + res_tx.send(dispatcher.dispatch(b)).unwrap(); + } + + join_handle.join().unwrap() + } +} + +pub struct CrossThread2; + +impl ExecutionStrategy for CrossThread2 { + fn run_bridge_and_client( + &self, + dispatcher: &mut impl DispatcherTrait, + input: Buffer, + run_client: extern "C" fn(Bridge, D) -> Buffer, + client_data: D, + ) -> Buffer { + use std::sync::{Arc, Mutex}; + + enum State { + Req(T), + Res(T), + } + + let mut state = Arc::new(Mutex::new(State::Res(Buffer::new()))); + + let server_thread = thread::current(); + let state2 = state.clone(); + let join_handle = thread::spawn(move || { + let mut dispatch = |b| { + *state2.lock().unwrap() = State::Req(b); + server_thread.unpark(); + loop { + thread::park(); + if let State::Res(b) = &mut *state2.lock().unwrap() { + break b.take(); + } + } + }; + + let r = run_client( + Bridge { + cached_buffer: input, + dispatch: (&mut dispatch).into(), + }, + client_data, + ); + + // Wake up the server so it can exit the dispatch loop. + drop(state2); + server_thread.unpark(); + + r + }); + + // Check whether `state2` was dropped, to know when to stop. + while Arc::get_mut(&mut state).is_none() { + thread::park(); + let mut b = match &mut *state.lock().unwrap() { + State::Req(b) => b.take(), + _ => continue, + }; + b = dispatcher.dispatch(b.take()); + *state.lock().unwrap() = State::Res(b); + join_handle.thread().unpark(); + } + + join_handle.join().unwrap() + } +} + +fn run_server< + S: Server, + I: Encode>>, + O: for<'a, 's> DecodeMut<'a, 's, HandleStore>>, + D: Copy + Send + 'static, +>( + strategy: &impl ExecutionStrategy, + handle_counters: &'static client::HandleCounters, + server: S, + input: I, + run_client: extern "C" fn(Bridge, D) -> Buffer, + client_data: D, +) -> Result { + let mut dispatcher = Dispatcher { + handle_store: HandleStore::new(handle_counters), + server: MarkedTypes(server), + }; + + let mut b = Buffer::new(); + input.encode(&mut b, &mut dispatcher.handle_store); + + b = strategy.run_bridge_and_client(&mut dispatcher, b, run_client, client_data); + + Result::decode(&mut &b[..], &mut dispatcher.handle_store) +} + +impl client::Client ::TokenStream> { + pub fn run( + &self, + strategy: &impl ExecutionStrategy, + server: S, + input: S::TokenStream, + ) -> Result { + let client::Client { + get_handle_counters, + run, + f, + } = *self; + run_server( + strategy, + get_handle_counters(), + server, + as Types>::TokenStream::mark(input), + run, + f, + ) + .map( as Types>::TokenStream::unmark) + } +} + +impl client::Client ::TokenStream> { + pub fn run( + &self, + strategy: &impl ExecutionStrategy, + server: S, + input: S::TokenStream, + input2: S::TokenStream, + ) -> Result { + let client::Client { + get_handle_counters, + run, + f, + } = *self; + run_server( + strategy, + get_handle_counters(), + server, + ( + as Types>::TokenStream::mark(input), + as Types>::TokenStream::mark(input2), + ), + run, + f, + ) + .map( as Types>::TokenStream::unmark) + } +} diff --git a/src/libproc_macro/diagnostic.rs b/src/libproc_macro/diagnostic.rs index bf23de3943..4234f0bcd2 100644 --- a/src/libproc_macro/diagnostic.rs +++ b/src/libproc_macro/diagnostic.rs @@ -10,8 +10,6 @@ use Span; -use rustc_errors as errors; - /// An enum representing a diagnostic level. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] #[derive(Copy, Clone, Debug)] @@ -180,22 +178,22 @@ impl Diagnostic { /// Emit the diagnostic. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub fn emit(self) { - fn to_internal(spans: Vec) -> ::syntax_pos::MultiSpan { - let spans: Vec<_> = spans.into_iter().map(|s| s.0).collect(); - ::syntax_pos::MultiSpan::from_spans(spans) + fn to_internal(spans: Vec) -> ::bridge::client::MultiSpan { + let mut multi_span = ::bridge::client::MultiSpan::new(); + for span in spans { + multi_span.push(span.0); + } + multi_span } - let level = self.level.to_internal(); - let mut diag = errors::Diagnostic::new(level, &*self.message); - diag.set_span(to_internal(self.spans)); - - for child in self.children { - let level = child.level.to_internal(); - diag.sub(level, &*child.message, to_internal(child.spans), None); + let mut diag = ::bridge::client::Diagnostic::new( + self.level, + &self.message[..], + to_internal(self.spans), + ); + for c in self.children { + diag.sub(c.level, &c.message[..], to_internal(c.spans)); } - - ::__internal::with_sess(move |sess, _| { - errors::DiagnosticBuilder::new_diagnostic(&sess.span_diagnostic, diag).emit(); - }); + diag.emit(); } } diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index 1a2b16a4fe..32c8130293 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -28,39 +28,30 @@ test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))))] #![feature(nll)] -#![feature(rustc_private)] #![feature(staged_api)] -#![feature(lang_items)] +#![feature(const_fn)] +#![feature(extern_types)] +#![feature(in_band_lifetimes)] #![feature(optin_builtin_traits)] #![feature(non_exhaustive)] +#![feature(specialization)] #![recursion_limit="256"] -extern crate syntax; -extern crate syntax_pos; -extern crate rustc_errors; -extern crate rustc_data_structures; - #[unstable(feature = "proc_macro_internals", issue = "27812")] #[doc(hidden)] -pub mod rustc; +pub mod bridge; mod diagnostic; #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub use diagnostic::{Diagnostic, Level, MultiSpan}; -use std::{ascii, fmt, iter}; +use std::{fmt, iter, mem}; +use std::ops::{Bound, RangeBounds}; use std::path::PathBuf; -use rustc_data_structures::sync::Lrc; use std::str::FromStr; -use syntax::errors::DiagnosticBuilder; -use syntax::parse::{self, token}; -use syntax::symbol::Symbol; -use syntax::tokenstream::{self, DelimSpan}; -use syntax_pos::{Pos, FileName}; - /// The main type provided by this crate, representing an abstract stream of /// tokens, or, more specifically, a sequence of token trees. /// The type provide interfaces for iterating over those token trees and, conversely, @@ -70,7 +61,7 @@ use syntax_pos::{Pos, FileName}; /// and `#[proc_macro_derive]` definitions. #[stable(feature = "proc_macro_lib", since = "1.15.0")] #[derive(Clone)] -pub struct TokenStream(tokenstream::TokenStream); +pub struct TokenStream(bridge::client::TokenStream); #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl !Send for TokenStream {} @@ -93,7 +84,7 @@ impl TokenStream { /// Returns an empty `TokenStream` containing no token trees. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn new() -> TokenStream { - TokenStream(tokenstream::TokenStream::empty()) + TokenStream(bridge::client::TokenStream::new()) } /// Checks if this `TokenStream` is empty. @@ -115,11 +106,16 @@ impl FromStr for TokenStream { type Err = LexError; fn from_str(src: &str) -> Result { - __internal::with_sess(|sess, data| { - Ok(__internal::token_stream_wrap(parse::parse_stream_from_source_str( - FileName::ProcMacroSourceCode, src.to_string(), sess, Some(data.call_site.0) - ))) - }) + Ok(TokenStream(bridge::client::TokenStream::from_str(src))) + } +} + +// NB: the bridge only provides `to_string`, implement `fmt::Display` +// based on it (the reverse of the usual relationship between the two). +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl ToString for TokenStream { + fn to_string(&self) -> String { + self.0.to_string() } } @@ -129,7 +125,7 @@ impl FromStr for TokenStream { #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl fmt::Display for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.0.fmt(f) + f.write_str(&self.to_string()) } } @@ -149,7 +145,12 @@ pub use quote::{quote, quote_span}; #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl From for TokenStream { fn from(tree: TokenTree) -> TokenStream { - TokenStream(tree.to_internal()) + TokenStream(bridge::client::TokenStream::from_token_tree(match tree { + TokenTree::Group(tt) => bridge::TokenTree::Group(tt.0), + TokenTree::Punct(tt) => bridge::TokenTree::Punct(tt.0), + TokenTree::Ident(tt) => bridge::TokenTree::Ident(tt.0), + TokenTree::Literal(tt) => bridge::TokenTree::Literal(tt.0) + })) } } @@ -166,7 +167,7 @@ impl iter::FromIterator for TokenStream { #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl iter::FromIterator for TokenStream { fn from_iter>(streams: I) -> Self { - let mut builder = tokenstream::TokenStreamBuilder::new(); + let mut builder = bridge::client::TokenStreamBuilder::new(); for stream in streams { builder.push(stream.0); } @@ -184,52 +185,34 @@ impl Extend for TokenStream { #[stable(feature = "token_stream_extend", since = "1.30.0")] impl Extend for TokenStream { fn extend>(&mut self, streams: I) { - self.0.extend(streams.into_iter().map(|stream| stream.0)); + // FIXME(eddyb) Use an optimized implementation if/when possible. + *self = iter::once(mem::replace(self, Self::new())).chain(streams).collect(); } } /// Public implementation details for the `TokenStream` type, such as iterators. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub mod token_stream { - use syntax::tokenstream; - use {TokenTree, TokenStream, Delimiter}; + use {bridge, Group, Ident, Literal, Punct, TokenTree, TokenStream}; /// An iterator over `TokenStream`'s `TokenTree`s. /// The iteration is "shallow", e.g. the iterator doesn't recurse into delimited groups, /// and returns whole groups as token trees. #[derive(Clone)] #[stable(feature = "proc_macro_lib2", since = "1.29.0")] - pub struct IntoIter { - cursor: tokenstream::Cursor, - stack: Vec, - } + pub struct IntoIter(bridge::client::TokenStreamIter); #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl Iterator for IntoIter { type Item = TokenTree; fn next(&mut self) -> Option { - loop { - let tree = self.stack.pop().or_else(|| { - let next = self.cursor.next_as_stream()?; - Some(TokenTree::from_internal(next, &mut self.stack)) - })?; - // HACK: The condition "dummy span + group with empty delimiter" represents an AST - // fragment approximately converted into a token stream. This may happen, for - // example, with inputs to proc macro attributes, including derives. Such "groups" - // need to flattened during iteration over stream's token trees. - // Eventually this needs to be removed in favor of keeping original token trees - // and not doing the roundtrip through AST. - if tree.span().0.is_dummy() { - if let TokenTree::Group(ref group) = tree { - if group.delimiter() == Delimiter::None { - self.cursor.insert(group.stream.clone().0); - continue - } - } - } - return Some(tree); - } + self.0.next().map(|tree| match tree { + bridge::TokenTree::Group(tt) => TokenTree::Group(Group(tt)), + bridge::TokenTree::Punct(tt) => TokenTree::Punct(Punct(tt)), + bridge::TokenTree::Ident(tt) => TokenTree::Ident(Ident(tt)), + bridge::TokenTree::Literal(tt) => TokenTree::Literal(Literal(tt)), + }) } } @@ -239,7 +222,7 @@ pub mod token_stream { type IntoIter = IntoIter; fn into_iter(self) -> IntoIter { - IntoIter { cursor: self.0.trees(), stack: Vec::new() } + IntoIter(self.0.into_iter()) } } } @@ -263,7 +246,7 @@ mod quote; /// A region of source code, along with macro expansion information. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] #[derive(Copy, Clone)] -pub struct Span(syntax_pos::Span); +pub struct Span(bridge::client::Span); #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl !Send for Span {} @@ -285,7 +268,7 @@ impl Span { /// A span that resolves at the macro definition site. #[unstable(feature = "proc_macro_def_site", issue = "54724")] pub fn def_site() -> Span { - ::__internal::with_sess(|_, data| data.def_site) + Span(bridge::client::Span::def_site()) } /// The span of the invocation of the current procedural macro. @@ -294,15 +277,13 @@ impl Span { /// at the macro call site will be able to refer to them as well. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn call_site() -> Span { - ::__internal::with_sess(|_, data| data.call_site) + Span(bridge::client::Span::call_site()) } /// The original source file into which this span points. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn source_file(&self) -> SourceFile { - SourceFile { - source_file: __internal::lookup_char_pos(self.0.lo()).file, - } + SourceFile(self.0.source_file()) } /// The `Span` for the tokens in the previous macro expansion from which @@ -317,27 +298,19 @@ impl Span { /// value is the same as `*self`. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn source(&self) -> Span { - Span(self.0.source_callsite()) + Span(self.0.source()) } /// Get the starting line/column in the source file for this span. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn start(&self) -> LineColumn { - let loc = __internal::lookup_char_pos(self.0.lo()); - LineColumn { - line: loc.line, - column: loc.col.to_usize() - } + self.0.start() } /// Get the ending line/column in the source file for this span. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn end(&self) -> LineColumn { - let loc = __internal::lookup_char_pos(self.0.hi()); - LineColumn { - line: loc.line, - column: loc.col.to_usize() - } + self.0.end() } /// Create a new span encompassing `self` and `other`. @@ -345,19 +318,14 @@ impl Span { /// Returns `None` if `self` and `other` are from different files. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn join(&self, other: Span) -> Option { - let self_loc = __internal::lookup_char_pos(self.0.lo()); - let other_loc = __internal::lookup_char_pos(other.0.lo()); - - if self_loc.file.name != other_loc.file.name { return None } - - Some(Span(self.0.to(other.0))) + self.0.join(other.0).map(Span) } /// Creates a new span with the same line/column information as `self` but /// that resolves symbols as though it were at `other`. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn resolved_at(&self, other: Span) -> Span { - Span(self.0.with_ctxt(other.0.ctxt())) + Span(self.0.resolved_at(other.0)) } /// Creates a new span with the same name resolution behavior as `self` but @@ -383,10 +351,7 @@ impl Span { #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Span { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?} bytes({}..{})", - self.0.ctxt(), - self.0.lo().0, - self.0.hi().0) + self.0.fmt(f) } } @@ -411,14 +376,7 @@ impl !Sync for LineColumn {} /// The source file of a given `Span`. #[unstable(feature = "proc_macro_span", issue = "54725")] #[derive(Clone)] -pub struct SourceFile { - source_file: Lrc, -} - -#[unstable(feature = "proc_macro_span", issue = "54725")] -impl !Send for SourceFile {} -#[unstable(feature = "proc_macro_span", issue = "54725")] -impl !Sync for SourceFile {} +pub struct SourceFile(bridge::client::SourceFile); impl SourceFile { /// Get the path to this source file. @@ -433,10 +391,7 @@ impl SourceFile { /// [`is_real`]: #method.is_real #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn path(&self) -> PathBuf { - match self.source_file.name { - FileName::Real(ref path) => path.clone(), - _ => PathBuf::from(self.source_file.name.to_string()) - } + PathBuf::from(self.0.path()) } /// Returns `true` if this source file is a real source file, and not generated by an external @@ -446,7 +401,7 @@ impl SourceFile { // This is a hack until intercrate spans are implemented and we can have real source files // for spans generated in external macros. // https://github.com/rust-lang/rust/pull/43604#issuecomment-333334368 - self.source_file.is_real_file() + self.0.is_real() } } @@ -464,7 +419,7 @@ impl fmt::Debug for SourceFile { #[unstable(feature = "proc_macro_span", issue = "54725")] impl PartialEq for SourceFile { fn eq(&self, other: &Self) -> bool { - Lrc::ptr_eq(&self.source_file, &other.source_file) + self.0.eq(&other.0) } } @@ -535,7 +490,7 @@ impl TokenTree { } } -/// Prints token treee in a form convenient for debugging. +/// Prints token tree in a form convenient for debugging. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -578,18 +533,27 @@ impl From for TokenTree { } } +// NB: the bridge only provides `to_string`, implement `fmt::Display` +// based on it (the reverse of the usual relationship between the two). +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl ToString for TokenTree { + fn to_string(&self) -> String { + match *self { + TokenTree::Group(ref t) => t.to_string(), + TokenTree::Ident(ref t) => t.to_string(), + TokenTree::Punct(ref t) => t.to_string(), + TokenTree::Literal(ref t) => t.to_string(), + } + } +} + /// Prints the token tree as a string that is supposed to be losslessly convertible back /// into the same token tree (modulo spans), except for possibly `TokenTree::Group`s /// with `Delimiter::None` delimiters and negative numeric literals. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - TokenTree::Group(ref t) => t.fmt(f), - TokenTree::Ident(ref t) => t.fmt(f), - TokenTree::Punct(ref t) => t.fmt(f), - TokenTree::Literal(ref t) => t.fmt(f), - } + f.write_str(&self.to_string()) } } @@ -598,11 +562,7 @@ impl fmt::Display for TokenTree { /// A `Group` internally contains a `TokenStream` which is surrounded by `Delimiter`s. #[derive(Clone)] #[stable(feature = "proc_macro_lib2", since = "1.29.0")] -pub struct Group { - delimiter: Delimiter, - stream: TokenStream, - span: DelimSpan, -} +pub struct Group(bridge::client::Group); #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl !Send for Group {} @@ -639,17 +599,13 @@ impl Group { /// method below. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { - Group { - delimiter: delimiter, - stream: stream, - span: DelimSpan::from_single(Span::call_site().0), - } + Group(bridge::client::Group::new(delimiter, stream.0)) } /// Returns the delimiter of this `Group` #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn delimiter(&self) -> Delimiter { - self.delimiter + self.0.delimiter() } /// Returns the `TokenStream` of tokens that are delimited in this `Group`. @@ -658,7 +614,7 @@ impl Group { /// returned above. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn stream(&self) -> TokenStream { - self.stream.clone() + TokenStream(self.0.stream()) } /// Returns the span for the delimiters of this token stream, spanning the @@ -670,7 +626,7 @@ impl Group { /// ``` #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn span(&self) -> Span { - Span(self.span.entire()) + Span(self.0.span()) } /// Returns the span pointing to the opening delimiter of this group. @@ -681,7 +637,7 @@ impl Group { /// ``` #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn span_open(&self) -> Span { - Span(self.span.open) + Span(self.0.span_open()) } /// Returns the span pointing to the closing delimiter of this group. @@ -692,7 +648,7 @@ impl Group { /// ``` #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn span_close(&self) -> Span { - Span(self.span.close) + Span(self.0.span_close()) } /// Configures the span for this `Group`'s delimiters, but not its internal @@ -703,7 +659,16 @@ impl Group { /// tokens at the level of the `Group`. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn set_span(&mut self, span: Span) { - self.span = DelimSpan::from_single(span.0); + self.0.set_span(span.0); + } +} + +// NB: the bridge only provides `to_string`, implement `fmt::Display` +// based on it (the reverse of the usual relationship between the two). +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl ToString for Group { + fn to_string(&self) -> String { + TokenStream::from(TokenTree::from(self.clone())).to_string() } } @@ -713,7 +678,7 @@ impl Group { #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Group { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - TokenStream::from(TokenTree::from(self.clone())).fmt(f) + f.write_str(&self.to_string()) } } @@ -730,15 +695,11 @@ impl fmt::Debug for Group { /// An `Punct` is an single punctuation character like `+`, `-` or `#`. /// -/// Multicharacter operators like `+=` are represented as two instances of `Punct` with different +/// Multi-character operators like `+=` are represented as two instances of `Punct` with different /// forms of `Spacing` returned. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] #[derive(Clone)] -pub struct Punct { - ch: char, - spacing: Spacing, - span: Span, -} +pub struct Punct(bridge::client::Punct); #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl !Send for Punct {} @@ -773,38 +734,43 @@ impl Punct { if !LEGAL_CHARS.contains(&ch) { panic!("unsupported character `{:?}`", ch) } - Punct { - ch: ch, - spacing: spacing, - span: Span::call_site(), - } + Punct(bridge::client::Punct::new(ch, spacing)) } /// Returns the value of this punctuation character as `char`. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn as_char(&self) -> char { - self.ch + self.0.as_char() } /// Returns the spacing of this punctuation character, indicating whether it's immediately /// followed by another `Punct` in the token stream, so they can potentially be combined into - /// a multicharacter operator (`Joint`), or it's followed by some other token or whitespace + /// a multi-character operator (`Joint`), or it's followed by some other token or whitespace /// (`Alone`) so the operator has certainly ended. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn spacing(&self) -> Spacing { - self.spacing + self.0.spacing() } /// Returns the span for this punctuation character. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn span(&self) -> Span { - self.span + Span(self.0.span()) } /// Configure the span for this punctuation character. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn set_span(&mut self, span: Span) { - self.span = span; + self.0 = self.0.with_span(span.0); + } +} + +// NB: the bridge only provides `to_string`, implement `fmt::Display` +// based on it (the reverse of the usual relationship between the two). +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl ToString for Punct { + fn to_string(&self) -> String { + TokenStream::from(TokenTree::from(self.clone())).to_string() } } @@ -813,7 +779,7 @@ impl Punct { #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Punct { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - TokenStream::from(TokenTree::from(self.clone())).fmt(f) + f.write_str(&self.to_string()) } } @@ -831,16 +797,7 @@ impl fmt::Debug for Punct { /// An identifier (`ident`). #[derive(Clone)] #[stable(feature = "proc_macro_lib2", since = "1.29.0")] -pub struct Ident { - sym: Symbol, - span: Span, - is_raw: bool, -} - -#[stable(feature = "proc_macro_lib2", since = "1.29.0")] -impl !Send for Ident {} -#[stable(feature = "proc_macro_lib2", since = "1.29.0")] -impl !Sync for Ident {} +pub struct Ident(bridge::client::Ident); impl Ident { fn is_valid(string: &str) -> bool { @@ -877,7 +834,7 @@ impl Ident { if !Ident::is_valid(string) { panic!("`{:?}` is not a valid identifier", string) } - Ident::new_maybe_raw(string, span, false) + Ident(bridge::client::Ident::new(string, span.0, false)) } /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). @@ -886,20 +843,29 @@ impl Ident { if !Ident::is_valid(string) { panic!("`{:?}` is not a valid identifier", string) } - Ident::new_maybe_raw(string, span, true) + Ident(bridge::client::Ident::new(string, span.0, true)) } /// Returns the span of this `Ident`, encompassing the entire string returned /// by `as_str`. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn span(&self) -> Span { - self.span + Span(self.0.span()) } /// Configures the span of this `Ident`, possibly changing its hygiene context. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn set_span(&mut self, span: Span) { - self.span = span; + self.0 = self.0.with_span(span.0); + } +} + +// NB: the bridge only provides `to_string`, implement `fmt::Display` +// based on it (the reverse of the usual relationship between the two). +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl ToString for Ident { + fn to_string(&self) -> String { + TokenStream::from(TokenTree::from(self.clone())).to_string() } } @@ -908,7 +874,7 @@ impl Ident { #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Ident { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - TokenStream::from(TokenTree::from(self.clone())).fmt(f) + f.write_str(&self.to_string()) } } @@ -926,19 +892,9 @@ impl fmt::Debug for Ident { /// character (`'a'`), byte character (`b'a'`), an integer or floating point number /// with or without a suffix (`1`, `1u8`, `2.3`, `2.3f32`). /// Boolean literals like `true` and `false` do not belong here, they are `Ident`s. -// FIXME(eddyb) `Literal` should not expose internal `Debug` impls. -#[derive(Clone, Debug)] +#[derive(Clone)] #[stable(feature = "proc_macro_lib2", since = "1.29.0")] -pub struct Literal { - lit: token::Lit, - suffix: Option, - span: Span, -} - -#[stable(feature = "proc_macro_lib2", since = "1.29.0")] -impl !Send for Literal {} -#[stable(feature = "proc_macro_lib2", since = "1.29.0")] -impl !Sync for Literal {} +pub struct Literal(bridge::client::Literal); macro_rules! suffixed_int_literals { ($($name:ident => $kind:ident,)*) => ($( @@ -947,7 +903,7 @@ macro_rules! suffixed_int_literals { /// This function will create an integer like `1u32` where the integer /// value specified is the first part of the token and the integral is /// also suffixed at the end. - /// Literals created from negative numbers may not survive rountrips through + /// Literals created from negative numbers may not survive round-trips through /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). /// /// Literals created through this method have the `Span::call_site()` @@ -955,11 +911,7 @@ macro_rules! suffixed_int_literals { /// below. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn $name(n: $kind) -> Literal { - Literal { - lit: token::Lit::Integer(Symbol::intern(&n.to_string())), - suffix: Some(Symbol::intern(stringify!($kind))), - span: Span::call_site(), - } + Literal(bridge::client::Literal::typed_integer(&n.to_string(), stringify!($kind))) } )*) } @@ -981,11 +933,7 @@ macro_rules! unsuffixed_int_literals { /// below. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn $name(n: $kind) -> Literal { - Literal { - lit: token::Lit::Integer(Symbol::intern(&n.to_string())), - suffix: None, - span: Span::call_site(), - } + Literal(bridge::client::Literal::integer(&n.to_string())) } )*) } @@ -1038,16 +986,12 @@ impl Literal { if !n.is_finite() { panic!("Invalid float literal {}", n); } - Literal { - lit: token::Lit::Float(Symbol::intern(&n.to_string())), - suffix: None, - span: Span::call_site(), - } + Literal(bridge::client::Literal::float(&n.to_string())) } /// Creates a new suffixed floating-point literal. /// - /// This consturctor will create a literal like `1.0f32` where the value + /// This constructor will create a literal like `1.0f32` where the value /// specified is the preceding part of the token and `f32` is the suffix of /// the token. This token will always be inferred to be an `f32` in the /// compiler. @@ -1063,11 +1007,7 @@ impl Literal { if !n.is_finite() { panic!("Invalid float literal {}", n); } - Literal { - lit: token::Lit::Float(Symbol::intern(&n.to_string())), - suffix: Some(Symbol::intern("f32")), - span: Span::call_site(), - } + Literal(bridge::client::Literal::f32(&n.to_string())) } /// Creates a new unsuffixed floating-point literal. @@ -1087,16 +1027,12 @@ impl Literal { if !n.is_finite() { panic!("Invalid float literal {}", n); } - Literal { - lit: token::Lit::Float(Symbol::intern(&n.to_string())), - suffix: None, - span: Span::call_site(), - } + Literal(bridge::client::Literal::float(&n.to_string())) } /// Creates a new suffixed floating-point literal. /// - /// This consturctor will create a literal like `1.0f64` where the value + /// This constructor will create a literal like `1.0f64` where the value /// specified is the preceding part of the token and `f64` is the suffix of /// the token. This token will always be inferred to be an `f64` in the /// compiler. @@ -1112,61 +1048,74 @@ impl Literal { if !n.is_finite() { panic!("Invalid float literal {}", n); } - Literal { - lit: token::Lit::Float(Symbol::intern(&n.to_string())), - suffix: Some(Symbol::intern("f64")), - span: Span::call_site(), - } + Literal(bridge::client::Literal::f64(&n.to_string())) } /// String literal. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn string(string: &str) -> Literal { - let mut escaped = String::new(); - for ch in string.chars() { - escaped.extend(ch.escape_debug()); - } - Literal { - lit: token::Lit::Str_(Symbol::intern(&escaped)), - suffix: None, - span: Span::call_site(), - } + Literal(bridge::client::Literal::string(string)) } /// Character literal. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn character(ch: char) -> Literal { - let mut escaped = String::new(); - escaped.extend(ch.escape_unicode()); - Literal { - lit: token::Lit::Char(Symbol::intern(&escaped)), - suffix: None, - span: Span::call_site(), - } + Literal(bridge::client::Literal::character(ch)) } /// Byte string literal. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn byte_string(bytes: &[u8]) -> Literal { - let string = bytes.iter().cloned().flat_map(ascii::escape_default) - .map(Into::::into).collect::(); - Literal { - lit: token::Lit::ByteStr(Symbol::intern(&string)), - suffix: None, - span: Span::call_site(), - } + Literal(bridge::client::Literal::byte_string(bytes)) } /// Returns the span encompassing this literal. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn span(&self) -> Span { - self.span + Span(self.0.span()) } /// Configures the span associated for this literal. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn set_span(&mut self, span: Span) { - self.span = span; + self.0.set_span(span.0); + } + + /// Returns a `Span` that is a subset of `self.span()` containing only the + /// source bytes in range `range`. Returns `None` if the would-be trimmed + /// span is outside the bounds of `self`. + // FIXME(SergioBenitez): check that the byte range starts and ends at a + // UTF-8 boundary of the source. otherwise, it's likely that a panic will + // occur elsewhere when the source text is printed. + // FIXME(SergioBenitez): there is no way for the user to know what + // `self.span()` actually maps to, so this method can currently only be + // called blindly. For example, `to_string()` for the character 'c' returns + // "'\u{63}'"; there is no way for the user to know whether the source text + // was 'c' or whether it was '\u{63}'. + #[unstable(feature = "proc_macro_span", issue = "54725")] + pub fn subspan>(&self, range: R) -> Option { + // HACK(eddyb) something akin to `Option::cloned`, but for `Bound<&T>`. + fn cloned_bound(bound: Bound<&T>) -> Bound { + match bound { + Bound::Included(x) => Bound::Included(x.clone()), + Bound::Excluded(x) => Bound::Excluded(x.clone()), + Bound::Unbounded => Bound::Unbounded, + } + } + + self.0.subspan( + cloned_bound(range.start_bound()), + cloned_bound(range.end_bound()), + ).map(Span) + } +} + +// NB: the bridge only provides `to_string`, implement `fmt::Display` +// based on it (the reverse of the usual relationship between the two). +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl ToString for Literal { + fn to_string(&self) -> String { + TokenStream::from(TokenTree::from(self.clone())).to_string() } } @@ -1175,149 +1124,14 @@ impl Literal { #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Literal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - TokenStream::from(TokenTree::from(self.clone())).fmt(f) + f.write_str(&self.to_string()) } } -/// Permanently unstable internal implementation details of this crate. This -/// should not be used. -/// -/// These methods are used by the rest of the compiler to generate instances of -/// `TokenStream` to hand to macro definitions, as well as consume the output. -/// -/// Note that this module is also intentionally separate from the rest of the -/// crate. This allows the `#[unstable]` directive below to naturally apply to -/// all of the contents. -#[unstable(feature = "proc_macro_internals", issue = "27812")] -#[doc(hidden)] -pub mod __internal { - use std::cell::Cell; - use std::ptr; - - use syntax::ast; - use syntax::ext::base::ExtCtxt; - use syntax::ptr::P; - use syntax::parse::{self, ParseSess}; - use syntax::parse::token::{self, Token}; - use syntax::tokenstream; - use syntax_pos::{BytePos, Loc, DUMMY_SP}; - use syntax_pos::hygiene::{SyntaxContext, Transparency}; - - use super::{TokenStream, LexError, Span}; - - pub fn lookup_char_pos(pos: BytePos) -> Loc { - with_sess(|sess, _| sess.source_map().lookup_char_pos(pos)) - } - - pub fn new_token_stream(item: P) -> TokenStream { - let token = Token::interpolated(token::NtItem(item)); - TokenStream(tokenstream::TokenTree::Token(DUMMY_SP, token).into()) - } - - pub fn token_stream_wrap(inner: tokenstream::TokenStream) -> TokenStream { - TokenStream(inner) - } - - pub fn token_stream_parse_items(stream: TokenStream) -> Result>, LexError> { - with_sess(move |sess, _| { - let mut parser = parse::stream_to_parser(sess, stream.0); - let mut items = Vec::new(); - - while let Some(item) = try!(parser.parse_item().map_err(super::parse_to_lex_err)) { - items.push(item) - } - - Ok(items) - }) - } - - pub fn token_stream_inner(stream: TokenStream) -> tokenstream::TokenStream { - stream.0 - } - - pub trait Registry { - fn register_custom_derive(&mut self, - trait_name: &str, - expand: fn(TokenStream) -> TokenStream, - attributes: &[&'static str]); - - fn register_attr_proc_macro(&mut self, - name: &str, - expand: fn(TokenStream, TokenStream) -> TokenStream); - - fn register_bang_proc_macro(&mut self, - name: &str, - expand: fn(TokenStream) -> TokenStream); - } - - #[derive(Clone, Copy)] - pub struct ProcMacroData { - pub def_site: Span, - pub call_site: Span, - } - - #[derive(Clone, Copy)] - struct ProcMacroSess { - parse_sess: *const ParseSess, - data: ProcMacroData, - } - - // Emulate scoped_thread_local!() here essentially - thread_local! { - static CURRENT_SESS: Cell = Cell::new(ProcMacroSess { - parse_sess: ptr::null(), - data: ProcMacroData { def_site: Span(DUMMY_SP), call_site: Span(DUMMY_SP) }, - }); - } - - pub fn set_sess(cx: &ExtCtxt, f: F) -> R - where F: FnOnce() -> R - { - struct Reset { prev: ProcMacroSess } - - impl Drop for Reset { - fn drop(&mut self) { - CURRENT_SESS.with(|p| p.set(self.prev)); - } - } - - CURRENT_SESS.with(|p| { - let _reset = Reset { prev: p.get() }; - - // No way to determine def location for a proc macro right now, so use call location. - let location = cx.current_expansion.mark.expn_info().unwrap().call_site; - let to_span = |transparency| Span(location.with_ctxt( - SyntaxContext::empty().apply_mark_with_transparency(cx.current_expansion.mark, - transparency)) - ); - p.set(ProcMacroSess { - parse_sess: cx.parse_sess, - data: ProcMacroData { - def_site: to_span(Transparency::Opaque), - call_site: to_span(Transparency::Transparent), - }, - }); - f() - }) - } - - pub fn in_sess() -> bool - { - !CURRENT_SESS.with(|sess| sess.get()).parse_sess.is_null() - } - - pub fn with_sess(f: F) -> R - where F: FnOnce(&ParseSess, &ProcMacroData) -> R - { - let sess = CURRENT_SESS.with(|sess| sess.get()); - if sess.parse_sess.is_null() { - panic!("procedural macro API is used outside of a procedural macro"); - } - f(unsafe { &*sess.parse_sess }, &sess.data) +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Debug for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. + self.0.fmt(f) } } - -fn parse_to_lex_err(mut err: DiagnosticBuilder) -> LexError { - err.cancel(); - LexError { _inner: () } -} diff --git a/src/libproc_macro/rustc.rs b/src/libproc_macro/rustc.rs deleted file mode 100644 index 3ce02d1afb..0000000000 --- a/src/libproc_macro/rustc.rs +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use {Delimiter, Level, Spacing, Span, __internal}; -use {Group, Ident, Literal, Punct, TokenTree}; - -use rustc_errors as errors; -use syntax::ast; -use syntax::parse::lexer::comments; -use syntax::parse::token; -use syntax::tokenstream; -use syntax_pos::symbol::{keywords, Symbol}; - -impl Ident { - pub(crate) fn new_maybe_raw(string: &str, span: Span, is_raw: bool) -> Ident { - let sym = Symbol::intern(string); - if is_raw - && (sym == keywords::Underscore.name() - || ast::Ident::with_empty_ctxt(sym).is_path_segment_keyword()) - { - panic!("`{:?}` is not a valid raw identifier", string) - } - Ident { sym, span, is_raw } - } -} - -impl Delimiter { - pub(crate) fn from_internal(delim: token::DelimToken) -> Delimiter { - match delim { - token::Paren => Delimiter::Parenthesis, - token::Brace => Delimiter::Brace, - token::Bracket => Delimiter::Bracket, - token::NoDelim => Delimiter::None, - } - } - - pub(crate) fn to_internal(self) -> token::DelimToken { - match self { - Delimiter::Parenthesis => token::Paren, - Delimiter::Brace => token::Brace, - Delimiter::Bracket => token::Bracket, - Delimiter::None => token::NoDelim, - } - } -} - -impl TokenTree { - pub(crate) fn from_internal( - stream: tokenstream::TokenStream, - stack: &mut Vec, - ) -> TokenTree { - use syntax::parse::token::*; - - let (tree, is_joint) = stream.as_tree(); - let (span, token) = match tree { - tokenstream::TokenTree::Token(span, token) => (span, token), - tokenstream::TokenTree::Delimited(span, delimed) => { - let delimiter = Delimiter::from_internal(delimed.delim); - let mut g = Group::new(delimiter, ::TokenStream(delimed.tts.into())); - g.span = span; - return g.into(); - } - }; - - let op_kind = if is_joint { - Spacing::Joint - } else { - Spacing::Alone - }; - macro_rules! tt { - ($e:expr) => {{ - let mut x = TokenTree::from($e); - x.set_span(Span(span)); - x - }}; - } - macro_rules! op { - ($a:expr) => { - tt!(Punct::new($a, op_kind)) - }; - ($a:expr, $b:expr) => {{ - stack.push(tt!(Punct::new($b, op_kind))); - tt!(Punct::new($a, Spacing::Joint)) - }}; - ($a:expr, $b:expr, $c:expr) => {{ - stack.push(tt!(Punct::new($c, op_kind))); - stack.push(tt!(Punct::new($b, Spacing::Joint))); - tt!(Punct::new($a, Spacing::Joint)) - }}; - } - - match token { - Eq => op!('='), - Lt => op!('<'), - Le => op!('<', '='), - EqEq => op!('=', '='), - Ne => op!('!', '='), - Ge => op!('>', '='), - Gt => op!('>'), - AndAnd => op!('&', '&'), - OrOr => op!('|', '|'), - Not => op!('!'), - Tilde => op!('~'), - BinOp(Plus) => op!('+'), - BinOp(Minus) => op!('-'), - BinOp(Star) => op!('*'), - BinOp(Slash) => op!('/'), - BinOp(Percent) => op!('%'), - BinOp(Caret) => op!('^'), - BinOp(And) => op!('&'), - BinOp(Or) => op!('|'), - BinOp(Shl) => op!('<', '<'), - BinOp(Shr) => op!('>', '>'), - BinOpEq(Plus) => op!('+', '='), - BinOpEq(Minus) => op!('-', '='), - BinOpEq(Star) => op!('*', '='), - BinOpEq(Slash) => op!('/', '='), - BinOpEq(Percent) => op!('%', '='), - BinOpEq(Caret) => op!('^', '='), - BinOpEq(And) => op!('&', '='), - BinOpEq(Or) => op!('|', '='), - BinOpEq(Shl) => op!('<', '<', '='), - BinOpEq(Shr) => op!('>', '>', '='), - At => op!('@'), - Dot => op!('.'), - DotDot => op!('.', '.'), - DotDotDot => op!('.', '.', '.'), - DotDotEq => op!('.', '.', '='), - Comma => op!(','), - Semi => op!(';'), - Colon => op!(':'), - ModSep => op!(':', ':'), - RArrow => op!('-', '>'), - LArrow => op!('<', '-'), - FatArrow => op!('=', '>'), - Pound => op!('#'), - Dollar => op!('$'), - Question => op!('?'), - SingleQuote => op!('\''), - - Ident(ident, false) => tt!(self::Ident::new(&ident.as_str(), Span(span))), - Ident(ident, true) => tt!(self::Ident::new_raw(&ident.as_str(), Span(span))), - Lifetime(ident) => { - let ident = ident.without_first_quote(); - stack.push(tt!(self::Ident::new(&ident.as_str(), Span(span)))); - tt!(Punct::new('\'', Spacing::Joint)) - } - Literal(lit, suffix) => tt!(self::Literal { - lit, - suffix, - span: Span(span) - }), - DocComment(c) => { - let style = comments::doc_comment_style(&c.as_str()); - let stripped = comments::strip_doc_comment_decoration(&c.as_str()); - let stream = vec![ - tt!(self::Ident::new("doc", Span(span))), - tt!(Punct::new('=', Spacing::Alone)), - tt!(self::Literal::string(&stripped)), - ].into_iter() - .collect(); - stack.push(tt!(Group::new(Delimiter::Bracket, stream))); - if style == ast::AttrStyle::Inner { - stack.push(tt!(Punct::new('!', Spacing::Alone))); - } - tt!(Punct::new('#', Spacing::Alone)) - } - - Interpolated(_) => __internal::with_sess(|sess, _| { - let tts = token.interpolated_to_tokenstream(sess, span); - tt!(Group::new(Delimiter::None, ::TokenStream(tts))) - }), - - DotEq => op!('.', '='), - OpenDelim(..) | CloseDelim(..) => unreachable!(), - Whitespace | Comment | Shebang(..) | Eof => unreachable!(), - } - } - - pub(crate) fn to_internal(self) -> tokenstream::TokenStream { - use syntax::parse::token::*; - use syntax::tokenstream::{Delimited, TokenTree}; - - let (ch, kind, span) = match self { - self::TokenTree::Punct(tt) => (tt.as_char(), tt.spacing(), tt.span()), - self::TokenTree::Group(tt) => { - return TokenTree::Delimited( - tt.span, - Delimited { - delim: tt.delimiter.to_internal(), - tts: tt.stream.0.into(), - }, - ).into(); - } - self::TokenTree::Ident(tt) => { - let token = Ident(ast::Ident::new(tt.sym, tt.span.0), tt.is_raw); - return TokenTree::Token(tt.span.0, token).into(); - } - self::TokenTree::Literal(self::Literal { - lit: Lit::Integer(ref a), - suffix, - span, - }) - if a.as_str().starts_with("-") => - { - let minus = BinOp(BinOpToken::Minus); - let integer = Symbol::intern(&a.as_str()[1..]); - let integer = Literal(Lit::Integer(integer), suffix); - let a = TokenTree::Token(span.0, minus); - let b = TokenTree::Token(span.0, integer); - return vec![a, b].into_iter().collect(); - } - self::TokenTree::Literal(self::Literal { - lit: Lit::Float(ref a), - suffix, - span, - }) - if a.as_str().starts_with("-") => - { - let minus = BinOp(BinOpToken::Minus); - let float = Symbol::intern(&a.as_str()[1..]); - let float = Literal(Lit::Float(float), suffix); - let a = TokenTree::Token(span.0, minus); - let b = TokenTree::Token(span.0, float); - return vec![a, b].into_iter().collect(); - } - self::TokenTree::Literal(tt) => { - let token = Literal(tt.lit, tt.suffix); - return TokenTree::Token(tt.span.0, token).into(); - } - }; - - let token = match ch { - '=' => Eq, - '<' => Lt, - '>' => Gt, - '!' => Not, - '~' => Tilde, - '+' => BinOp(Plus), - '-' => BinOp(Minus), - '*' => BinOp(Star), - '/' => BinOp(Slash), - '%' => BinOp(Percent), - '^' => BinOp(Caret), - '&' => BinOp(And), - '|' => BinOp(Or), - '@' => At, - '.' => Dot, - ',' => Comma, - ';' => Semi, - ':' => Colon, - '#' => Pound, - '$' => Dollar, - '?' => Question, - '\'' => SingleQuote, - _ => unreachable!(), - }; - - let tree = TokenTree::Token(span.0, token); - match kind { - Spacing::Alone => tree.into(), - Spacing::Joint => tree.joint(), - } - } -} - -impl Level { - pub(crate) fn to_internal(self) -> errors::Level { - match self { - Level::Error => errors::Level::Error, - Level::Warning => errors::Level::Warning, - Level::Note => errors::Level::Note, - Level::Help => errors::Level::Help, - } - } -} diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index d0ec8640ce..3316735de6 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -18,7 +18,6 @@ lazy_static = "1.0.0" scoped-tls = { version = "0.1.1", features = ["nightly"] } log = { version = "0.4", features = ["release_max_level_info", "std"] } polonius-engine = "0.5.0" -proc_macro = { path = "../libproc_macro" } rustc-rayon = "0.1.1" rustc-rayon-core = "0.1.1" rustc_apfloat = { path = "../librustc_apfloat" } diff --git a/src/librustc/README.md b/src/librustc/README.md index 9909ff91a1..c0e5c542bd 100644 --- a/src/librustc/README.md +++ b/src/librustc/README.md @@ -1,3 +1,3 @@ For more information about how rustc works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ +[rustc guide]: https://rust-lang.github.io/rustc-guide/ diff --git a/src/librustc/cfg/graphviz.rs b/src/librustc/cfg/graphviz.rs index cc4f3f95d0..650aa39114 100644 --- a/src/librustc/cfg/graphviz.rs +++ b/src/librustc/cfg/graphviz.rs @@ -106,8 +106,7 @@ impl<'a> dot::GraphWalk<'a> for &'a cfg::CFG { type Node = Node<'a>; type Edge = Edge<'a>; fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { - let mut v = Vec::new(); - self.graph.each_node(|i, nd| { v.push((i, nd)); true }); + let v: Vec<_> = self.graph.enumerated_nodes().collect(); v.into() } fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { diff --git a/src/librustc/dep_graph/README.md b/src/librustc/dep_graph/README.md index f1f383d7ad..91a06e452e 100644 --- a/src/librustc/dep_graph/README.md +++ b/src/librustc/dep_graph/README.md @@ -1,4 +1,4 @@ To learn more about how dependency tracking works in rustc, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/query.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/query.html diff --git a/src/librustc/dep_graph/cgu_reuse_tracker.rs b/src/librustc/dep_graph/cgu_reuse_tracker.rs index 99fc020bbe..0392d32989 100644 --- a/src/librustc/dep_graph/cgu_reuse_tracker.rs +++ b/src/librustc/dep_graph/cgu_reuse_tracker.rs @@ -9,7 +9,7 @@ // except according to those terms. //! Some facilities for tracking how codegen-units are reused during incremental -//! compilition. This is used for incremental compiliation tests and debug +//! compilation. This is used for incremental compilation tests and debug //! output. use session::Session; diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 4d6d3bd56f..388bbc52c3 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -63,7 +63,7 @@ use mir::interpret::GlobalId; use hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; use hir::map::DefPathHash; -use hir::{HirId, ItemLocalId}; +use hir::HirId; use ich::{Fingerprint, StableHashingContext}; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; @@ -530,6 +530,7 @@ define_dep_nodes!( <'tcx> [] UsedTraitImports(DefId), [] HasTypeckTables(DefId), [] ConstEval { param_env: ParamEnvAnd<'tcx, GlobalId<'tcx>> }, + [] ConstEvalRaw { param_env: ParamEnvAnd<'tcx, GlobalId<'tcx>> }, [] CheckMatch(DefId), [] SymbolName(DefId), [] InstanceSymbolName { instance: Instance<'tcx> }, @@ -595,7 +596,7 @@ define_dep_nodes!( <'tcx> [] ReachableNonGenerics(CrateNum), [] NativeLibraries(CrateNum), [] PluginRegistrarFn(CrateNum), - [] DeriveRegistrarFn(CrateNum), + [] ProcMacroDeclsStatic(CrateNum), [input] CrateDisambiguator(CrateNum), [input] CrateHash(CrateNum), [input] OriginalCrateName(CrateNum), @@ -789,11 +790,11 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for HirId { fn to_fingerprint(&self, tcx: TyCtxt<'_, '_, '_>) -> Fingerprint { let HirId { owner, - local_id: ItemLocalId(local_id), + local_id, } = *self; let def_path_hash = tcx.def_path_hash(DefId::local(owner)); - let local_id = Fingerprint::from_smaller_hash(local_id as u64); + let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into()); def_path_hash.0.combine(local_id) } diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index ddc5676c74..4c94c993ab 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -101,11 +101,11 @@ impl DepGraph { DepGraph { data: Some(Lrc::new(DepGraphData { previous_work_products: prev_work_products, - dep_node_debug: Lock::new(Default::default()), + dep_node_debug: Default::default(), current: Lock::new(CurrentDepGraph::new()), previous: prev_graph, colors: Lock::new(DepNodeColorMap::new(prev_graph_node_count)), - loaded_from_cache: Lock::new(Default::default()), + loaded_from_cache: Default::default(), })), fingerprints: Lrc::new(Lock::new(fingerprints)), } @@ -195,7 +195,7 @@ impl DepGraph { /// - If you need 3+ arguments, use a tuple for the /// `arg` parameter. /// - /// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/incremental-compilation.html + /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html pub fn with_task<'gcx, C, A, R>(&self, key: DepNode, cx: C, diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index 1d21a5cf79..96590c1fc7 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -2134,7 +2134,7 @@ static X: u32 = 42; register_diagnostics! { -// E0006 // merged with E0005 +// E0006, // merged with E0005 // E0101, // replaced with E0282 // E0102, // replaced with E0282 // E0134, @@ -2183,9 +2183,7 @@ register_diagnostics! { E0657, // `impl Trait` can only capture lifetimes bound at the fn level E0687, // in-band lifetimes cannot be used in `fn`/`Fn` syntax E0688, // in-band lifetimes cannot be mixed with explicit lifetime binders - E0697, // closures cannot be static - E0707, // multiple elided lifetimes used in arguments of `async fn` E0708, // `async` non-`move` closures with arguments are not currently supported E0709, // multiple different lifetimes used in arguments of `async fn` diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs index fc51d1de82..50922ee601 100644 --- a/src/librustc/hir/def.rs +++ b/src/librustc/hir/def.rs @@ -36,7 +36,7 @@ pub enum NonMacroAttrKind { Tool, /// Single-segment custom attribute registered by a derive macro (`#[serde(default)]`). DeriveHelper, - /// Single-segment custom attriubte registered by a legacy plugin (`register_attribute`). + /// Single-segment custom attribute registered by a legacy plugin (`register_attribute`). LegacyPluginHelper, /// Single-segment custom attribute not registered in any way (`#[my_attr]`). Custom, diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index dcc0f8545e..d9963f23a1 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -49,7 +49,6 @@ use hir::map::{self, Map}; use super::itemlikevisit::DeepVisitor; use std::cmp; -use std::u32; #[derive(Copy, Clone)] pub enum FnKind<'a> { @@ -1152,8 +1151,8 @@ pub struct IdRange { impl IdRange { pub fn max() -> IdRange { IdRange { - min: NodeId::from_u32(u32::MAX), - max: NodeId::from_u32(u32::MIN), + min: NodeId::MAX, + max: NodeId::from_u32(0), } } diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index 8576df976b..dc8baa112b 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -67,7 +67,6 @@ use syntax::ast; use syntax::ast::*; use syntax::errors; use syntax::ext::hygiene::{Mark, SyntaxContext}; -use syntax::feature_gate::{emit_feature_err, GateIssue}; use syntax::print::pprust; use syntax::ptr::P; use syntax::source_map::{self, respan, CompilerDesugaringKind, Spanned}; @@ -244,9 +243,9 @@ pub fn lower_crate( loop_scopes: Vec::new(), is_in_loop_condition: false, anonymous_lifetime_mode: AnonymousLifetimeMode::PassThrough, - type_def_lifetime_params: DefIdMap(), + type_def_lifetime_params: Default::default(), current_hir_id_owner: vec![(CRATE_DEF_INDEX, 0)], - item_local_id_counters: NodeMap(), + item_local_id_counters: Default::default(), node_id_to_hir_id: IndexVec::new(), is_generator: false, is_in_trait_impl: false, @@ -588,7 +587,7 @@ impl<'a> LoweringContext<'a> { *local_id_counter += 1; hir::HirId { owner: def_index, - local_id: hir::ItemLocalId(local_id), + local_id: hir::ItemLocalId::from_u32(local_id), } }) } @@ -616,7 +615,7 @@ impl<'a> LoweringContext<'a> { hir::HirId { owner: def_index, - local_id: hir::ItemLocalId(local_id), + local_id: hir::ItemLocalId::from_u32(local_id), } }) } @@ -1062,8 +1061,7 @@ impl<'a> LoweringContext<'a> { attrs .iter() .map(|a| self.lower_attr(a)) - .collect::>() - .into() + .collect() } fn lower_attr(&mut self, attr: &Attribute) -> Attribute { @@ -1169,7 +1167,7 @@ impl<'a> LoweringContext<'a> { hir::TyKind::BareFn(P(hir::BareFnTy { generic_params: this.lower_generic_params( &f.generic_params, - &NodeMap(), + &NodeMap::default(), ImplTraitContext::disallowed(), ), unsafety: this.lower_unsafety(f.unsafety), @@ -2472,7 +2470,7 @@ impl<'a> LoweringContext<'a> { // FIXME: This could probably be done with less rightward drift. Also looks like two control // paths where report_error is called are also the only paths that advance to after // the match statement, so the error reporting could probably just be moved there. - let mut add_bounds: NodeMap> = NodeMap(); + let mut add_bounds: NodeMap> = Default::default(); for pred in &generics.where_clause.predicates { if let WherePredicate::BoundPredicate(ref bound_pred) = *pred { 'next_bound: for bound in &bound_pred.bounds { @@ -2557,7 +2555,7 @@ impl<'a> LoweringContext<'a> { hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { bound_generic_params: this.lower_generic_params( bound_generic_params, - &NodeMap(), + &NodeMap::default(), ImplTraitContext::disallowed(), ), bounded_ty: this.lower_ty(bounded_ty, ImplTraitContext::disallowed()), @@ -2641,8 +2639,11 @@ impl<'a> LoweringContext<'a> { p: &PolyTraitRef, mut itctx: ImplTraitContext<'_>, ) -> hir::PolyTraitRef { - let bound_generic_params = - self.lower_generic_params(&p.bound_generic_params, &NodeMap(), itctx.reborrow()); + let bound_generic_params = self.lower_generic_params( + &p.bound_generic_params, + &NodeMap::default(), + itctx.reborrow(), + ); let trait_ref = self.with_parent_impl_lifetime_defs( &bound_generic_params, |this| this.lower_trait_ref(&p.trait_ref, itctx), @@ -3626,7 +3627,6 @@ impl<'a> LoweringContext<'a> { ParamMode::Optional, ImplTraitContext::disallowed(), ); - self.check_self_struct_ctor_feature(&qpath); hir::PatKind::TupleStruct( qpath, pats.iter().map(|x| self.lower_pat(x)).collect(), @@ -3641,7 +3641,6 @@ impl<'a> LoweringContext<'a> { ParamMode::Optional, ImplTraitContext::disallowed(), ); - self.check_self_struct_ctor_feature(&qpath); hir::PatKind::Path(qpath) } PatKind::Struct(ref path, ref fields, etc) => { @@ -3761,7 +3760,7 @@ impl<'a> LoweringContext<'a> { let ohs = P(self.lower_expr(ohs)); hir::ExprKind::Unary(op, ohs) } - ExprKind::Lit(ref l) => hir::ExprKind::Lit(P((**l).clone())), + ExprKind::Lit(ref l) => hir::ExprKind::Lit(P((*l).clone())), ExprKind::Cast(ref expr, ref ty) => { let expr = P(self.lower_expr(expr)); hir::ExprKind::Cast(expr, self.lower_ty(ty, ImplTraitContext::disallowed())) @@ -4037,7 +4036,6 @@ impl<'a> LoweringContext<'a> { ParamMode::Optional, ImplTraitContext::disallowed(), ); - self.check_self_struct_ctor_feature(&qpath); hir::ExprKind::Path(qpath) } ExprKind::Break(opt_label, ref opt_expr) => { @@ -4932,23 +4930,24 @@ impl<'a> LoweringContext<'a> { let node = match qpath { hir::QPath::Resolved(None, path) => { // Turn trait object paths into `TyKind::TraitObject` instead. - if let Def::Trait(_) = path.def { - let principal = hir::PolyTraitRef { - bound_generic_params: hir::HirVec::new(), - trait_ref: hir::TraitRef { - path: path.and_then(|path| path), - ref_id: id.node_id, - hir_ref_id: id.hir_id, - }, - span, - }; + match path.def { + Def::Trait(_) | Def::TraitAlias(_) => { + let principal = hir::PolyTraitRef { + bound_generic_params: hir::HirVec::new(), + trait_ref: hir::TraitRef { + path: path.and_then(|path| path), + ref_id: id.node_id, + hir_ref_id: id.hir_id, + }, + span, + }; - // The original ID is taken by the `PolyTraitRef`, - // so the `Ty` itself needs a different one. - id = self.next_id(); - hir::TyKind::TraitObject(hir_vec![principal], self.elided_dyn_bound(span)) - } else { - hir::TyKind::Path(hir::QPath::Resolved(None, path)) + // The original ID is taken by the `PolyTraitRef`, + // so the `Ty` itself needs a different one. + id = self.next_id(); + hir::TyKind::TraitObject(hir_vec![principal], self.elided_dyn_bound(span)) + } + _ => hir::TyKind::Path(hir::QPath::Resolved(None, path)), } } _ => hir::TyKind::Path(qpath), @@ -5099,18 +5098,6 @@ impl<'a> LoweringContext<'a> { ThinVec::new())); P(self.expr_call(e.span, from_err, hir_vec![e])) } - - fn check_self_struct_ctor_feature(&self, qp: &hir::QPath) { - if let hir::QPath::Resolved(_, ref p) = qp { - if p.segments.len() == 1 && - p.segments[0].ident.name == keywords::SelfType.name() && - !self.sess.features_untracked().self_struct_ctor { - emit_feature_err(&self.sess.parse_sess, "self_struct_ctor", - p.span, GateIssue::Language, - "`Self` struct constructors are unstable"); - } - } - } } fn body_ids(bodies: &BTreeMap) -> Vec { diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs index 69706aabcb..1ab1c7d3fc 100644 --- a/src/librustc/hir/map/blocks.rs +++ b/src/librustc/hir/map/blocks.rs @@ -43,7 +43,7 @@ pub struct FnLikeNode<'a> { node: Node<'a> } /// MaybeFnLike wraps a method that indicates if an object /// corresponds to some FnLikeNode. -pub trait MaybeFnLike { fn is_fn_like(&self) -> bool; } +trait MaybeFnLike { fn is_fn_like(&self) -> bool; } impl MaybeFnLike for ast::Item { fn is_fn_like(&self) -> bool { diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index dd68915408..eb9bd183fd 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -36,6 +36,7 @@ use util::nodemap::NodeMap; /// Internally the DefPathTable holds a tree of DefKeys, where each DefKey /// stores the DefIndex of its parent. /// There is one DefPathTable for each crate. +#[derive(Default)] pub struct DefPathTable { index_to_key: [Vec; 2], def_path_hashes: [Vec; 2], @@ -153,7 +154,7 @@ impl Decodable for DefPathTable { /// The definition table containing node definitions. /// It holds the DefPathTable for local DefIds/DefPaths and it also stores a /// mapping from NodeIds to local DefIds. -#[derive(Clone)] +#[derive(Clone, Default)] pub struct Definitions { table: DefPathTable, node_to_def_index: NodeMap, @@ -412,20 +413,8 @@ impl Definitions { /// ascending order. /// /// FIXME: there is probably a better place to put this comment. - pub fn new() -> Definitions { - Definitions { - table: DefPathTable { - index_to_key: [vec![], vec![]], - def_path_hashes: [vec![], vec![]], - }, - node_to_def_index: NodeMap(), - def_index_to_node: [vec![], vec![]], - node_to_hir_id: IndexVec::new(), - parent_modules_of_macro_defs: Default::default(), - expansions_that_defined: Default::default(), - next_disambiguator: Default::default(), - def_index_to_span: Default::default(), - } + pub fn new() -> Self { + Self::default() } pub fn def_path_table(&self) -> &DefPathTable { diff --git a/src/librustc/hir/map/hir_id_validator.rs b/src/librustc/hir/map/hir_id_validator.rs index 896a6163eb..ac4119dc37 100644 --- a/src/librustc/hir/map/hir_id_validator.rs +++ b/src/librustc/hir/map/hir_id_validator.rs @@ -101,7 +101,7 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { if max != self.hir_ids_seen.len() - 1 { // Collect the missing ItemLocalIds let missing: Vec<_> = (0 .. max as u32 + 1) - .filter(|&i| !self.hir_ids_seen.contains_key(&ItemLocalId(i))) + .filter(|&i| !self.hir_ids_seen.contains_key(&ItemLocalId::from_u32(i))) .collect(); // Try to map those to something more useful @@ -110,7 +110,7 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { for local_id in missing { let hir_id = HirId { owner: owner_def_index, - local_id: ItemLocalId(local_id as u32), + local_id: ItemLocalId::from_u32(local_id), }; trace!("missing hir id {:#?}", hir_id); @@ -124,7 +124,7 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { .enumerate() .find(|&(_, &entry)| hir_id == entry) .expect("no node_to_hir_id entry"); - let node_id = NodeId::new(node_id); + let node_id = NodeId::from_usize(node_id); missing_items.push(format!("[local_id: {}, node:{}]", local_id, self.hir_map.node_to_string(node_id))); diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index 9e7b26baa1..ef777abfbc 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -301,9 +301,7 @@ impl<'hir> Map<'hir> { ItemKind::Struct(..) => Some(Def::Struct(def_id())), ItemKind::Union(..) => Some(Def::Union(def_id())), ItemKind::Trait(..) => Some(Def::Trait(def_id())), - ItemKind::TraitAlias(..) => { - bug!("trait aliases are not yet implemented (see issue #41517)") - }, + ItemKind::TraitAlias(..) => Some(Def::TraitAlias(def_id())), ItemKind::ExternCrate(_) | ItemKind::Use(..) | ItemKind::ForeignMod(..) | @@ -1254,7 +1252,7 @@ fn node_id_to_string(map: &Map<'_>, id: NodeId, include_id: bool) -> String { Some(Node::MacroDef(_)) => { format!("macro {}{}", path_str(), id_str) } - Some(Node::Crate) => format!("root_crate"), + Some(Node::Crate) => String::from("root_crate"), None => format!("unknown node{}", id_str), } } diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index a2095ff40c..1674320165 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -27,7 +27,7 @@ use syntax_pos::{Span, DUMMY_SP, symbol::InternedString}; use syntax::source_map::{self, Spanned}; use rustc_target::spec::abi::Abi; use syntax::ast::{self, CrateSugar, Ident, Name, NodeId, DUMMY_NODE_ID, AsmDialect}; -use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem}; +use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy}; use syntax::attr::InlineAttr; use syntax::ext::hygiene::SyntaxContext; use syntax::ptr::P; @@ -37,7 +37,6 @@ use syntax::util::parser::ExprPrecedence; use ty::AdtKind; use ty::query::Providers; -use rustc_data_structures::indexed_vec; use rustc_data_structures::sync::{ParallelIterator, par_iter, Send, Sync, scope}; use rustc_data_structures::thin_vec::ThinVec; @@ -58,7 +57,6 @@ macro_rules! hir_vec { ($($x:expr),*) => ( $crate::hir::HirVec::from(vec![$($x),*]) ); - ($($x:expr,)*) => (hir_vec![$($x),*]) } pub mod check_attr; @@ -121,40 +119,28 @@ impl serialize::UseSpecializedDecodable for HirId { } } - -/// An `ItemLocalId` uniquely identifies something within a given "item-like", -/// that is within a hir::Item, hir::TraitItem, or hir::ImplItem. There is no -/// guarantee that the numerical value of a given `ItemLocalId` corresponds to -/// the node's position within the owning item in any way, but there is a -/// guarantee that the `LocalItemId`s within an owner occupy a dense range of -/// integers starting at zero, so a mapping that maps all or most nodes within -/// an "item-like" to something else can be implement by a `Vec` instead of a -/// tree or hash map. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, - RustcEncodable, RustcDecodable)] -pub struct ItemLocalId(pub u32); - -impl ItemLocalId { - pub fn as_usize(&self) -> usize { - self.0 as usize +// hack to ensure that we don't try to access the private parts of `ItemLocalId` in this module +mod item_local_id_inner { + use rustc_data_structures::indexed_vec::Idx; + /// An `ItemLocalId` uniquely identifies something within a given "item-like", + /// that is within a hir::Item, hir::TraitItem, or hir::ImplItem. There is no + /// guarantee that the numerical value of a given `ItemLocalId` corresponds to + /// the node's position within the owning item in any way, but there is a + /// guarantee that the `LocalItemId`s within an owner occupy a dense range of + /// integers starting at zero, so a mapping that maps all or most nodes within + /// an "item-like" to something else can be implement by a `Vec` instead of a + /// tree or hash map. + newtype_index! { + pub struct ItemLocalId { .. } } } -impl indexed_vec::Idx for ItemLocalId { - fn new(idx: usize) -> Self { - debug_assert!((idx as u32) as usize == idx); - ItemLocalId(idx as u32) - } - - fn index(self) -> usize { - self.0 as usize - } -} +pub use self::item_local_id_inner::ItemLocalId; /// The `HirId` corresponding to CRATE_NODE_ID and CRATE_DEF_INDEX pub const CRATE_HIR_ID: HirId = HirId { owner: CRATE_DEF_INDEX, - local_id: ItemLocalId(0) + local_id: ItemLocalId::from_u32_const(0) }; pub const DUMMY_HIR_ID: HirId = HirId { @@ -162,7 +148,7 @@ pub const DUMMY_HIR_ID: HirId = HirId { local_id: DUMMY_ITEM_LOCAL_ID, }; -pub const DUMMY_ITEM_LOCAL_ID: ItemLocalId = ItemLocalId(!0); +pub const DUMMY_ITEM_LOCAL_ID: ItemLocalId = ItemLocalId::MAX; #[derive(Clone, RustcEncodable, RustcDecodable, Copy)] pub struct Label { @@ -331,7 +317,7 @@ impl Path { impl fmt::Debug for Path { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "path({})", print::to_string(print::NO_ANN, |s| s.print_path(self, false))) + write!(f, "path({})", self) } } @@ -506,9 +492,9 @@ pub enum TraitBoundModifier { } /// The AST represents all type param bounds as types. -/// typeck::collect::compute_bounds matches these against -/// the "special" built-in traits (see middle::lang_items) and -/// detects Copy, Send and Sync. +/// `typeck::collect::compute_bounds` matches these against +/// the "special" built-in traits (see `middle::lang_items`) and +/// detects `Copy`, `Send` and `Sync`. #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub enum GenericBound { Trait(PolyTraitRef, TraitBoundModifier), @@ -698,14 +684,12 @@ pub struct WhereEqPredicate { pub rhs_ty: P, } -pub type CrateConfig = HirVec>; - /// The top-level data structure that stores the entire contents of /// the crate currently being compiled. /// /// For more details, see the [rustc guide]. /// -/// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/hir.html +/// [rustc guide]: https://rust-lang.github.io/rustc-guide/hir.html #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Crate { pub module: Mod, @@ -1196,8 +1180,8 @@ impl StmtKind { pub fn id(&self) -> NodeId { match *self { - StmtKind::Decl(_, id) => id, - StmtKind::Expr(_, id) => id, + StmtKind::Decl(_, id) | + StmtKind::Expr(_, id) | StmtKind::Semi(_, id) => id, } } diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs index e69d32ad1d..9a0ceddcf1 100644 --- a/src/librustc/hir/print.rs +++ b/src/librustc/hir/print.rs @@ -25,6 +25,7 @@ use hir; use hir::{PatKind, GenericBound, TraitBoundModifier, RangeEnd}; use hir::{GenericParam, GenericParamKind, GenericArg}; +use std::borrow::Cow; use std::cell::Cell; use std::io::{self, Write, Read}; use std::iter::Peekable; @@ -209,7 +210,7 @@ pub fn to_string(ann: &dyn PpAnn, f: F) -> String String::from_utf8(wr).unwrap() } -pub fn visibility_qualified(vis: &hir::Visibility, w: &str) -> String { +pub fn visibility_qualified>>(vis: &hir::Visibility, w: S) -> String { to_string(NO_ANN, |s| { s.print_visibility(vis)?; s.s.word(w) @@ -226,12 +227,13 @@ impl<'a> State<'a> { self.s.word(" ") } - pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> { + pub fn word_nbsp>>(&mut self, w: S) -> io::Result<()> { self.s.word(w)?; self.nbsp() } - pub fn head(&mut self, w: &str) -> io::Result<()> { + pub fn head>>(&mut self, w: S) -> io::Result<()> { + let w = w.into(); // outer-box is consistent self.cbox(indent_unit)?; // head-box is inconsistent @@ -303,7 +305,7 @@ impl<'a> State<'a> { pub fn synth_comment(&mut self, text: String) -> io::Result<()> { self.s.word("/*")?; self.s.space()?; - self.s.word(&text[..])?; + self.s.word(text)?; self.s.space()?; self.s.word("*/") } @@ -468,7 +470,7 @@ impl<'a> State<'a> { self.end() // end the outer fn box } hir::ForeignItemKind::Static(ref t, m) => { - self.head(&visibility_qualified(&item.vis, "static"))?; + self.head(visibility_qualified(&item.vis, "static"))?; if m { self.word_space("mut")?; } @@ -480,7 +482,7 @@ impl<'a> State<'a> { self.end() // end the outer cbox } hir::ForeignItemKind::Type => { - self.head(&visibility_qualified(&item.vis, "type"))?; + self.head(visibility_qualified(&item.vis, "type"))?; self.print_name(item.name)?; self.s.word(";")?; self.end()?; // end the head-ibox @@ -495,7 +497,7 @@ impl<'a> State<'a> { default: Option, vis: &hir::Visibility) -> io::Result<()> { - self.s.word(&visibility_qualified(vis, ""))?; + self.s.word(visibility_qualified(vis, ""))?; self.word_space("const")?; self.print_ident(ident)?; self.word_space(":")?; @@ -534,7 +536,7 @@ impl<'a> State<'a> { self.ann.pre(self, AnnNode::Item(item))?; match item.node { hir::ItemKind::ExternCrate(orig_name) => { - self.head(&visibility_qualified(&item.vis, "extern crate"))?; + self.head(visibility_qualified(&item.vis, "extern crate"))?; if let Some(orig_name) = orig_name { self.print_name(orig_name)?; self.s.space()?; @@ -547,7 +549,7 @@ impl<'a> State<'a> { self.end()?; // end outer head-block } hir::ItemKind::Use(ref path, kind) => { - self.head(&visibility_qualified(&item.vis, "use"))?; + self.head(visibility_qualified(&item.vis, "use"))?; self.print_path(path, false)?; match kind { @@ -566,7 +568,7 @@ impl<'a> State<'a> { self.end()?; // end outer head-block } hir::ItemKind::Static(ref ty, m, expr) => { - self.head(&visibility_qualified(&item.vis, "static"))?; + self.head(visibility_qualified(&item.vis, "static"))?; if m == hir::MutMutable { self.word_space("mut")?; } @@ -582,7 +584,7 @@ impl<'a> State<'a> { self.end()?; // end the outer cbox } hir::ItemKind::Const(ref ty, expr) => { - self.head(&visibility_qualified(&item.vis, "const"))?; + self.head(visibility_qualified(&item.vis, "const"))?; self.print_name(item.name)?; self.word_space(":")?; self.print_type(&ty)?; @@ -609,7 +611,7 @@ impl<'a> State<'a> { self.ann.nested(self, Nested::Body(body))?; } hir::ItemKind::Mod(ref _mod) => { - self.head(&visibility_qualified(&item.vis, "mod"))?; + self.head(visibility_qualified(&item.vis, "mod"))?; self.print_name(item.name)?; self.nbsp()?; self.bopen()?; @@ -618,18 +620,18 @@ impl<'a> State<'a> { } hir::ItemKind::ForeignMod(ref nmod) => { self.head("extern")?; - self.word_nbsp(&nmod.abi.to_string())?; + self.word_nbsp(nmod.abi.to_string())?; self.bopen()?; self.print_foreign_mod(nmod, &item.attrs)?; self.bclose(item.span)?; } hir::ItemKind::GlobalAsm(ref ga) => { - self.head(&visibility_qualified(&item.vis, "global asm"))?; - self.s.word(&ga.asm.as_str())?; + self.head(visibility_qualified(&item.vis, "global asm"))?; + self.s.word(ga.asm.as_str().get())?; self.end()? } hir::ItemKind::Ty(ref ty, ref generics) => { - self.head(&visibility_qualified(&item.vis, "type"))?; + self.head(visibility_qualified(&item.vis, "type"))?; self.print_name(item.name)?; self.print_generic_params(&generics.params)?; self.end()?; // end the inner ibox @@ -642,7 +644,7 @@ impl<'a> State<'a> { self.end()?; // end the outer ibox } hir::ItemKind::Existential(ref exist) => { - self.head(&visibility_qualified(&item.vis, "existential type"))?; + self.head(visibility_qualified(&item.vis, "existential type"))?; self.print_name(item.name)?; self.print_generic_params(&exist.generics.params)?; self.end()?; // end the inner ibox @@ -668,11 +670,11 @@ impl<'a> State<'a> { self.print_enum_def(enum_definition, params, item.name, item.span, &item.vis)?; } hir::ItemKind::Struct(ref struct_def, ref generics) => { - self.head(&visibility_qualified(&item.vis, "struct"))?; + self.head(visibility_qualified(&item.vis, "struct"))?; self.print_struct(struct_def, generics, item.name, item.span, true)?; } hir::ItemKind::Union(ref struct_def, ref generics) => { - self.head(&visibility_qualified(&item.vis, "union"))?; + self.head(visibility_qualified(&item.vis, "union"))?; self.print_struct(struct_def, generics, item.name, item.span, true)?; } hir::ItemKind::Impl(unsafety, @@ -795,7 +797,7 @@ impl<'a> State<'a> { span: syntax_pos::Span, visibility: &hir::Visibility) -> io::Result<()> { - self.head(&visibility_qualified(visibility, "enum"))?; + self.head(visibility_qualified(visibility, "enum"))?; self.print_name(name)?; self.print_generic_params(&generics.params)?; self.print_where_clause(&generics.where_clause)?; @@ -1587,14 +1589,14 @@ impl<'a> State<'a> { } pub fn print_usize(&mut self, i: usize) -> io::Result<()> { - self.s.word(&i.to_string()) + self.s.word(i.to_string()) } pub fn print_ident(&mut self, ident: ast::Ident) -> io::Result<()> { if ident.is_raw_guess() { - self.s.word(&format!("r#{}", ident.name))?; + self.s.word(format!("r#{}", ident.name))?; } else { - self.s.word(&ident.as_str())?; + self.s.word(ident.as_str().get())?; } self.ann.post(self, AnnNode::Name(&ident.name)) } @@ -2010,7 +2012,7 @@ impl<'a> State<'a> { self.commasep(Inconsistent, &decl.inputs, |s, ty| { s.ibox(indent_unit)?; if let Some(arg_name) = arg_names.get(i) { - s.s.word(&arg_name.as_str())?; + s.s.word(arg_name.as_str().get())?; s.s.word(":")?; s.s.space()?; } else if let Some(body_id) = body_id { @@ -2073,7 +2075,8 @@ impl<'a> State<'a> { } } - pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::GenericBound]) -> io::Result<()> { + pub fn print_bounds(&mut self, prefix: &'static str, bounds: &[hir::GenericBound]) + -> io::Result<()> { if !bounds.is_empty() { self.s.word(prefix)?; let mut first = true; @@ -2322,7 +2325,7 @@ impl<'a> State<'a> { Some(Abi::Rust) => Ok(()), Some(abi) => { self.word_nbsp("extern")?; - self.word_nbsp(&abi.to_string()) + self.word_nbsp(abi.to_string()) } None => Ok(()), } @@ -2332,7 +2335,7 @@ impl<'a> State<'a> { match opt_abi { Some(abi) => { self.word_nbsp("extern")?; - self.word_nbsp(&abi.to_string()) + self.word_nbsp(abi.to_string()) } None => Ok(()), } @@ -2342,7 +2345,7 @@ impl<'a> State<'a> { header: hir::FnHeader, vis: &hir::Visibility) -> io::Result<()> { - self.s.word(&visibility_qualified(vis, ""))?; + self.s.word(visibility_qualified(vis, ""))?; match header.constness { hir::Constness::NotConst => {} @@ -2358,7 +2361,7 @@ impl<'a> State<'a> { if header.abi != Abi::Rust { self.word_nbsp("extern")?; - self.word_nbsp(&header.abi.to_string())?; + self.word_nbsp(header.abi.to_string())?; } self.s.word("fn") diff --git a/src/librustc/ich/caching_codemap_view.rs b/src/librustc/ich/caching_source_map_view.rs similarity index 100% rename from src/librustc/ich/caching_codemap_view.rs rename to src/librustc/ich/caching_source_map_view.rs diff --git a/src/librustc/ich/hcx.rs b/src/librustc/ich/hcx.rs index 1c7c1b854d..7c623a1874 100644 --- a/src/librustc/ich/hcx.rs +++ b/src/librustc/ich/hcx.rs @@ -369,8 +369,7 @@ impl<'a> HashStable> for Span { // times, we cache a stable hash of it and hash that instead of // recursing every time. thread_local! { - static CACHE: RefCell> = - RefCell::new(Default::default()); + static CACHE: RefCell> = Default::default(); } let sub_hash: u64 = CACHE.with(|cache| { diff --git a/src/librustc/ich/impls_hir.rs b/src/librustc/ich/impls_hir.rs index b220634d0d..ae0d78d295 100644 --- a/src/librustc/ich/impls_hir.rs +++ b/src/librustc/ich/impls_hir.rs @@ -79,7 +79,14 @@ impl<'a> ToStableHashKey> for CrateNum { } } -impl_stable_hash_for!(tuple_struct hir::ItemLocalId { index }); +impl<'a> HashStable> for hir::ItemLocalId { + #[inline] + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + self.as_u32().hash_stable(hcx, hasher); + } +} impl<'a> ToStableHashKey> for hir::ItemLocalId { @@ -800,7 +807,7 @@ impl<'a> HashStable> for hir::Mod { .iter() .map(|id| { let (def_path_hash, local_id) = id.id.to_stable_hash_key(hcx); - debug_assert_eq!(local_id, hir::ItemLocalId(0)); + debug_assert_eq!(local_id, hir::ItemLocalId::from_u32(0)); def_path_hash.0 }).fold(Fingerprint::ZERO, |a, b| { a.combine_commutative(b) diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index 274a2df283..d98bb82aab 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -37,68 +37,31 @@ impl_stable_hash_for!(struct mir::BasicBlockData<'tcx> { statements, terminator, impl_stable_hash_for!(struct mir::UnsafetyViolation { source_info, description, details, kind }); impl_stable_hash_for!(struct mir::UnsafetyCheckResult { violations, unsafe_blocks }); -impl<'a> HashStable> -for mir::BorrowKind { - #[inline] - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); +impl_stable_hash_for!(enum mir::BorrowKind { + Shared, + Shallow, + Unique, + Mut { allow_two_phase_borrow }, +}); - match *self { - mir::BorrowKind::Shared | - mir::BorrowKind::Shallow | - mir::BorrowKind::Unique => {} - mir::BorrowKind::Mut { allow_two_phase_borrow } => { - allow_two_phase_borrow.hash_stable(hcx, hasher); - } - } - } -} - - -impl<'a> HashStable> -for mir::UnsafetyViolationKind { - #[inline] - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - - mem::discriminant(self).hash_stable(hcx, hasher); - - match *self { - mir::UnsafetyViolationKind::General => {} - mir::UnsafetyViolationKind::MinConstFn => {} - mir::UnsafetyViolationKind::ExternStatic(lint_node_id) | - mir::UnsafetyViolationKind::BorrowPacked(lint_node_id) => { - lint_node_id.hash_stable(hcx, hasher); - } - - } - } -} +impl_stable_hash_for!(enum mir::UnsafetyViolationKind { + General, + MinConstFn, + ExternStatic(lint_node_id), + BorrowPacked(lint_node_id), +}); impl_stable_hash_for!(struct mir::Terminator<'tcx> { kind, source_info }); -impl<'a, 'gcx, T> HashStable> for mir::ClearCrossCrate - where T: HashStable> -{ - #[inline] - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - mir::ClearCrossCrate::Clear => {} - mir::ClearCrossCrate::Set(ref value) => { - value.hash_stable(hcx, hasher); - } - } +impl_stable_hash_for!( + impl for enum mir::ClearCrossCrate [ mir::ClearCrossCrate ] { + Clear, + Set(value), } -} +); impl<'a> HashStable> for mir::Local { #[inline] @@ -254,12 +217,12 @@ for mir::StatementKind<'gcx> { mir::StatementKind::StorageDead(ref place) => { place.hash_stable(hcx, hasher); } - mir::StatementKind::EndRegion(ref region_scope) => { - region_scope.hash_stable(hcx, hasher); + mir::StatementKind::EscapeToRaw(ref place) => { + place.hash_stable(hcx, hasher); } - mir::StatementKind::Validate(ref op, ref places) => { - op.hash_stable(hcx, hasher); - places.hash_stable(hcx, hasher); + mir::StatementKind::Retag { fn_entry, ref place } => { + fn_entry.hash_stable(hcx, hasher); + place.hash_stable(hcx, hasher); } mir::StatementKind::AscribeUserType(ref place, ref variance, ref c_ty) => { place.hash_stable(hcx, hasher); @@ -278,23 +241,6 @@ for mir::StatementKind<'gcx> { impl_stable_hash_for!(enum mir::FakeReadCause { ForMatchGuard, ForMatchedPlace, ForLet }); -impl<'a, 'gcx, T> HashStable> - for mir::ValidationOperand<'gcx, T> - where T: HashStable> -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) - { - self.place.hash_stable(hcx, hasher); - self.ty.hash_stable(hcx, hasher); - self.re.hash_stable(hcx, hasher); - self.mutbl.hash_stable(hcx, hasher); - } -} - -impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(region_scope) }); - impl<'a, 'gcx> HashStable> for mir::Place<'gcx> { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, diff --git a/src/librustc/ich/impls_syntax.rs b/src/librustc/ich/impls_syntax.rs index 4be467c01a..b629fb820b 100644 --- a/src/librustc/ich/impls_syntax.rs +++ b/src/librustc/ich/impls_syntax.rs @@ -314,7 +314,6 @@ fn hash_token<'a, 'gcx, W: StableHasherResult>( token::Token::DotDot | token::Token::DotDotDot | token::Token::DotDotEq | - token::Token::DotEq | token::Token::Comma | token::Token::Semi | token::Token::Colon | diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index 642eb11006..e6a7c20f79 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -100,9 +100,6 @@ for ty::RegionKind { ty::ReEmpty => { // No variant fields to hash for these ... } - ty::ReCanonical(c) => { - c.hash_stable(hcx, hasher); - } ty::ReLateBound(db, ty::BrAnon(i)) => { db.hash_stable(hcx, hasher); i.hash_stable(hcx, hasher); @@ -147,7 +144,7 @@ impl<'a> HashStable> for ty::RegionVid { } } -impl<'gcx> HashStable> for ty::BoundTyIndex { +impl<'gcx> HashStable> for ty::BoundVar { #[inline] fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, @@ -219,7 +216,9 @@ impl<'gcx> HashStable> for ty::adjustment::AutoBorrow } } -impl_stable_hash_for!(struct ty::UpvarId { var_id, closure_expr_id }); +impl_stable_hash_for!(struct ty::UpvarPath { hir_id }); + +impl_stable_hash_for!(struct ty::UpvarId { var_path, closure_expr_id }); impl_stable_hash_for!(enum ty::BorrowKind { ImmBorrow, @@ -227,20 +226,10 @@ impl_stable_hash_for!(enum ty::BorrowKind { MutBorrow }); -impl<'a, 'gcx> HashStable> -for ty::UpvarCapture<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - ty::UpvarCapture::ByValue => {} - ty::UpvarCapture::ByRef(ref up_var_borrow) => { - up_var_borrow.hash_stable(hcx, hasher); - } - } - } -} +impl_stable_hash_for!(impl<'gcx> for enum ty::UpvarCapture<'gcx> [ ty::UpvarCapture ] { + ByValue, + ByRef(up_var_borrow), +}); impl_stable_hash_for!(struct ty::GenSig<'tcx> { yield_ty, @@ -275,64 +264,23 @@ impl_stable_hash_for!(enum ty::Visibility { impl_stable_hash_for!(struct ty::TraitRef<'tcx> { def_id, substs }); impl_stable_hash_for!(struct ty::TraitPredicate<'tcx> { trait_ref }); impl_stable_hash_for!(struct ty::SubtypePredicate<'tcx> { a_is_expected, a, b }); - -impl<'a, 'gcx, A, B> HashStable> -for ty::OutlivesPredicate - where A: HashStable>, - B: HashStable>, -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - let ty::OutlivesPredicate(ref a, ref b) = *self; - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher); - } -} - +impl_stable_hash_for!(impl for tuple_struct ty::OutlivesPredicate { a, b }); impl_stable_hash_for!(struct ty::ProjectionPredicate<'tcx> { projection_ty, ty }); impl_stable_hash_for!(struct ty::ProjectionTy<'tcx> { substs, item_def_id }); - -impl<'a, 'gcx> HashStable> for ty::Predicate<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - ty::Predicate::Trait(ref pred) => { - pred.hash_stable(hcx, hasher); - } - ty::Predicate::Subtype(ref pred) => { - pred.hash_stable(hcx, hasher); - } - ty::Predicate::RegionOutlives(ref pred) => { - pred.hash_stable(hcx, hasher); - } - ty::Predicate::TypeOutlives(ref pred) => { - pred.hash_stable(hcx, hasher); - } - ty::Predicate::Projection(ref pred) => { - pred.hash_stable(hcx, hasher); - } - ty::Predicate::WellFormed(ty) => { - ty.hash_stable(hcx, hasher); - } - ty::Predicate::ObjectSafe(def_id) => { - def_id.hash_stable(hcx, hasher); - } - ty::Predicate::ClosureKind(def_id, closure_substs, closure_kind) => { - def_id.hash_stable(hcx, hasher); - closure_substs.hash_stable(hcx, hasher); - closure_kind.hash_stable(hcx, hasher); - } - ty::Predicate::ConstEvaluatable(def_id, substs) => { - def_id.hash_stable(hcx, hasher); - substs.hash_stable(hcx, hasher); - } - } +impl_stable_hash_for!( + impl<'tcx> for enum ty::Predicate<'tcx> [ ty::Predicate ] { + Trait(pred), + Subtype(pred), + RegionOutlives(pred), + TypeOutlives(pred), + Projection(pred), + WellFormed(ty), + ObjectSafe(def_id), + ClosureKind(def_id, closure_substs, closure_kind), + ConstEvaluatable(def_id, substs), } -} +); impl<'a> HashStable> for ty::AdtFlags { fn hash_stable(&self, @@ -361,70 +309,43 @@ impl_stable_hash_for!(struct ty::FieldDef { vis, }); -impl<'a, 'gcx> HashStable> -for ::mir::interpret::ConstValue<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - use mir::interpret::ConstValue::*; +impl_stable_hash_for!( + impl<'tcx> for enum mir::interpret::ConstValue<'tcx> [ mir::interpret::ConstValue ] { + Unevaluated(def_id, substs), + Scalar(val), + ScalarPair(a, b), + ByRef(id, alloc, offset), + } +); +impl_stable_hash_for!(struct ::mir::interpret::RawConst<'tcx> { + alloc_id, + ty, +}); - mem::discriminant(self).hash_stable(hcx, hasher); - - match *self { - Unevaluated(def_id, substs) => { - def_id.hash_stable(hcx, hasher); - substs.hash_stable(hcx, hasher); - } - Scalar(val) => { - val.hash_stable(hcx, hasher); - } - ScalarPair(a, b) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher); - } - ByRef(id, alloc, offset) => { - id.hash_stable(hcx, hasher); - alloc.hash_stable(hcx, hasher); - offset.hash_stable(hcx, hasher); - } - } +impl_stable_hash_for! { + impl for struct mir::interpret::Pointer { + alloc_id, + offset, + tag, } } -impl<'a, Tag> HashStable> -for ::mir::interpret::Pointer -where Tag: HashStable> -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - let ::mir::interpret::Pointer { alloc_id, offset, tag } = self; - alloc_id.hash_stable(hcx, hasher); - offset.hash_stable(hcx, hasher); - tag.hash_stable(hcx, hasher); +impl_stable_hash_for!( + impl for enum mir::interpret::Scalar [ mir::interpret::Scalar ] { + Bits { bits, size }, + Ptr(ptr), } -} +); -impl<'a, Tag> HashStable> -for ::mir::interpret::Scalar -where Tag: HashStable> -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - use mir::interpret::Scalar::*; - - mem::discriminant(self).hash_stable(hcx, hasher); - match self { - Bits { bits, size } => { - bits.hash_stable(hcx, hasher); - size.hash_stable(hcx, hasher); - }, - Ptr(ptr) => ptr.hash_stable(hcx, hasher), - } +impl_stable_hash_for!( + impl<'tcx, M> for enum mir::interpret::AllocType<'tcx, M> [ mir::interpret::AllocType ] { + Function(instance), + Static(def_id), + Memory(mem), } -} +); +// AllocIds get resolved to whatever they point to (to be stable) impl<'a> HashStable> for mir::interpret::AllocId { fn hash_stable( &self, @@ -440,23 +361,7 @@ impl<'a> HashStable> for mir::interpret::AllocId { } } -impl<'a, 'gcx, M: HashStable>> HashStable> -for mir::interpret::AllocType<'gcx, M> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - use mir::interpret::AllocType::*; - - mem::discriminant(self).hash_stable(hcx, hasher); - - match *self { - Function(instance) => instance.hash_stable(hcx, hasher), - Static(def_id) => def_id.hash_stable(hcx, hasher), - Memory(ref mem) => mem.hash_stable(hcx, hasher), - } - } -} - +// Allocations treat their relocations specially impl<'a> HashStable> for mir::interpret::Allocation { fn hash_stable( &self, @@ -483,16 +388,15 @@ impl_stable_hash_for!(struct ty::Const<'tcx> { val }); -impl_stable_hash_for!(struct ::mir::interpret::ConstEvalErr<'tcx> { - span, - stacktrace, - error +impl_stable_hash_for!(enum mir::interpret::ErrorHandled { + Reported, + TooGeneric }); -impl_stable_hash_for!(struct ::mir::interpret::FrameInfo { - span, +impl_stable_hash_for!(struct mir::interpret::FrameInfo<'tcx> { + call_site, lint_root, - location + instance }); impl_stable_hash_for!(struct ty::ClosureSubsts<'tcx> { substs }); @@ -503,169 +407,81 @@ impl_stable_hash_for!(struct ty::GenericPredicates<'tcx> { predicates }); -impl_stable_hash_for!(struct ::mir::interpret::EvalError<'tcx> { kind }); - -impl<'a, 'gcx, O: HashStable>> HashStable> -for ::mir::interpret::EvalErrorKind<'gcx, O> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - use mir::interpret::EvalErrorKind::*; - - mem::discriminant(&self).hash_stable(hcx, hasher); - - match *self { - FunctionArgCountMismatch | - DanglingPointerDeref | - DoubleFree | - InvalidMemoryAccess | - InvalidFunctionPointer | - InvalidBool | - InvalidNullPointerUsage | - ReadPointerAsBytes | - ReadBytesAsPointer | - ReadForeignStatic | - InvalidPointerMath | - DeadLocal | - StackFrameLimitReached | - OutOfTls | - TlsOutOfBounds | - CalledClosureAsFunction | - VtableForArgumentlessMethod | - ModifiedConstantMemory | - AssumptionNotHeld | - InlineAsm | - ReallocateNonBasePtr | - DeallocateNonBasePtr | - HeapAllocZeroBytes | - Unreachable | - ReadFromReturnPointer | - UnimplementedTraitSelection | - TypeckError | - TooGeneric | - CheckMatchError | - DerefFunctionPointer | - ExecuteMemory | - OverflowNeg | - RemainderByZero | - DivisionByZero | - GeneratorResumedAfterReturn | - GeneratorResumedAfterPanic | - InfiniteLoop => {} - ReadUndefBytes(offset) => offset.hash_stable(hcx, hasher), - InvalidDiscriminant(val) => val.hash_stable(hcx, hasher), - Panic { ref msg, ref file, line, col } => { - msg.hash_stable(hcx, hasher); - file.hash_stable(hcx, hasher); - line.hash_stable(hcx, hasher); - col.hash_stable(hcx, hasher); - }, - ReferencedConstant(ref err) => err.hash_stable(hcx, hasher), - MachineError(ref err) => err.hash_stable(hcx, hasher), - FunctionAbiMismatch(a, b) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher) - }, - FunctionArgMismatch(a, b) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher) - }, - FunctionRetMismatch(a, b) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher) - }, - NoMirFor(ref s) => s.hash_stable(hcx, hasher), - UnterminatedCString(ptr) => ptr.hash_stable(hcx, hasher), - PointerOutOfBounds { - ptr, - access, - allocation_size, - } => { - ptr.hash_stable(hcx, hasher); - access.hash_stable(hcx, hasher); - allocation_size.hash_stable(hcx, hasher) - }, - InvalidBoolOp(bop) => bop.hash_stable(hcx, hasher), - Unimplemented(ref s) => s.hash_stable(hcx, hasher), - BoundsCheck { ref len, ref index } => { - len.hash_stable(hcx, hasher); - index.hash_stable(hcx, hasher) - }, - Intrinsic(ref s) => s.hash_stable(hcx, hasher), - InvalidChar(c) => c.hash_stable(hcx, hasher), - AbiViolation(ref s) => s.hash_stable(hcx, hasher), - AlignmentCheckFailed { - required, - has, - } => { - required.hash_stable(hcx, hasher); - has.hash_stable(hcx, hasher) - }, - MemoryLockViolation { - ptr, - len, - frame, - access, - ref lock, - } => { - ptr.hash_stable(hcx, hasher); - len.hash_stable(hcx, hasher); - frame.hash_stable(hcx, hasher); - access.hash_stable(hcx, hasher); - lock.hash_stable(hcx, hasher) - }, - MemoryAcquireConflict { - ptr, - len, - kind, - ref lock, - } => { - ptr.hash_stable(hcx, hasher); - len.hash_stable(hcx, hasher); - kind.hash_stable(hcx, hasher); - lock.hash_stable(hcx, hasher) - }, - InvalidMemoryLockRelease { - ptr, - len, - frame, - ref lock, - } => { - ptr.hash_stable(hcx, hasher); - len.hash_stable(hcx, hasher); - frame.hash_stable(hcx, hasher); - lock.hash_stable(hcx, hasher) - }, - DeallocatedLockedMemory { - ptr, - ref lock, - } => { - ptr.hash_stable(hcx, hasher); - lock.hash_stable(hcx, hasher) - }, - ValidationFailure(ref s) => s.hash_stable(hcx, hasher), - TypeNotPrimitive(ty) => ty.hash_stable(hcx, hasher), - ReallocatedWrongMemoryKind(ref a, ref b) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher) - }, - DeallocatedWrongMemoryKind(ref a, ref b) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher) - }, - IncorrectAllocationInformation(a, b, c, d) => { - a.hash_stable(hcx, hasher); - b.hash_stable(hcx, hasher); - c.hash_stable(hcx, hasher); - d.hash_stable(hcx, hasher) - }, - Layout(lay) => lay.hash_stable(hcx, hasher), - HeapAllocNonPowerOfTwoAlignment(n) => n.hash_stable(hcx, hasher), - PathNotFound(ref v) => v.hash_stable(hcx, hasher), - Overflow(op) => op.hash_stable(hcx, hasher), - } +impl_stable_hash_for!( + impl<'tcx, O> for enum mir::interpret::EvalErrorKind<'tcx, O> + [ mir::interpret::EvalErrorKind ] + { + FunctionArgCountMismatch, + DanglingPointerDeref, + DoubleFree, + InvalidMemoryAccess, + InvalidFunctionPointer, + InvalidBool, + InvalidNullPointerUsage, + ReadPointerAsBytes, + ReadBytesAsPointer, + ReadForeignStatic, + InvalidPointerMath, + DeadLocal, + StackFrameLimitReached, + OutOfTls, + TlsOutOfBounds, + CalledClosureAsFunction, + VtableForArgumentlessMethod, + ModifiedConstantMemory, + ModifiedStatic, + AssumptionNotHeld, + InlineAsm, + ReallocateNonBasePtr, + DeallocateNonBasePtr, + HeapAllocZeroBytes, + Unreachable, + ReadFromReturnPointer, + UnimplementedTraitSelection, + TypeckError, + TooGeneric, + DerefFunctionPointer, + ExecuteMemory, + OverflowNeg, + RemainderByZero, + DivisionByZero, + GeneratorResumedAfterReturn, + GeneratorResumedAfterPanic, + ReferencedConstant, + InfiniteLoop, + ReadUndefBytes(offset), + InvalidDiscriminant(val), + Panic { msg, file, line, col }, + MachineError(err), + FunctionAbiMismatch(a, b), + FunctionArgMismatch(a, b), + FunctionRetMismatch(a, b), + NoMirFor(s), + UnterminatedCString(ptr), + PointerOutOfBounds { ptr, check, allocation_size }, + InvalidBoolOp(bop), + Unimplemented(s), + BoundsCheck { len, index }, + Intrinsic(s), + InvalidChar(c), + AbiViolation(s), + AlignmentCheckFailed { required, has }, + ValidationFailure(s), + TypeNotPrimitive(ty), + ReallocatedWrongMemoryKind(a, b), + DeallocatedWrongMemoryKind(a, b), + IncorrectAllocationInformation(a, b, c, d), + Layout(lay), + HeapAllocNonPowerOfTwoAlignment(n), + PathNotFound(v), + Overflow(op), } -} +); + +impl_stable_hash_for!(enum mir::interpret::InboundsCheck { + Live, + MaybeDead +}); impl_stable_hash_for!(enum mir::interpret::Lock { NoLock, @@ -712,47 +528,18 @@ impl_stable_hash_for!(struct ty::GenericParamDef { kind }); -impl<'a> HashStable> for ty::GenericParamDefKind { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - ty::GenericParamDefKind::Lifetime => {} - ty::GenericParamDefKind::Type { - has_default, - ref object_lifetime_default, - ref synthetic, - } => { - has_default.hash_stable(hcx, hasher); - object_lifetime_default.hash_stable(hcx, hasher); - synthetic.hash_stable(hcx, hasher); - } - } - } -} +impl_stable_hash_for!(enum ty::GenericParamDefKind { + Lifetime, + Type { has_default, object_lifetime_default, synthetic }, +}); -impl<'a, 'gcx, T> HashStable> -for ::middle::resolve_lifetime::Set1 - where T: HashStable> -{ - fn hash_stable(&self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { - use middle::resolve_lifetime::Set1; - - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - Set1::Empty | - Set1::Many => { - // Nothing to do. - } - Set1::One(ref value) => { - value.hash_stable(hcx, hasher); - } - } +impl_stable_hash_for!( + impl for enum ::middle::resolve_lifetime::Set1 [ ::middle::resolve_lifetime::Set1 ] { + Empty, + Many, + One(value), } -} +); impl_stable_hash_for!(enum ::middle::resolve_lifetime::LifetimeDefOrigin { ExplicitOrElided, @@ -898,6 +685,13 @@ for ty::TyKind<'gcx> Param(param_ty) => { param_ty.hash_stable(hcx, hasher); } + Bound(debruijn, bound_ty) => { + debruijn.hash_stable(hcx, hasher); + bound_ty.hash_stable(hcx, hasher); + } + ty::Placeholder(placeholder_ty) => { + placeholder_ty.hash_stable(hcx, hasher); + } Foreign(def_id) => { def_id.hash_stable(hcx, hasher); } @@ -915,7 +709,6 @@ impl_stable_hash_for!(enum ty::InferTy { FreshTy(a), FreshIntTy(a), FreshFloatTy(a), - BoundTy(a), }); impl<'a, 'gcx> HashStable> @@ -1011,6 +804,9 @@ impl<'a, 'gcx> HashStable> for ty::InstanceDef<'gcx> { ty::InstanceDef::Item(def_id) => { def_id.hash_stable(hcx, hasher); } + ty::InstanceDef::VtableShim(def_id) => { + def_id.hash_stable(hcx, hasher); + } ty::InstanceDef::Intrinsic(def_id) => { def_id.hash_stable(hcx, hasher); } @@ -1162,6 +958,7 @@ for traits::Vtable<'gcx, N> where N: HashStable> { &VtableClosure(ref table_closure) => table_closure.hash_stable(hcx, hasher), &VtableFnPointer(ref table_fn_pointer) => table_fn_pointer.hash_stable(hcx, hasher), &VtableGenerator(ref table_generator) => table_generator.hash_stable(hcx, hasher), + &VtableTraitAlias(ref table_alias) => table_alias.hash_stable(hcx, hasher), } } } @@ -1270,14 +1067,30 @@ for traits::VtableGeneratorData<'gcx, N> where N: HashStable HashStable> +for traits::VtableTraitAliasData<'gcx, N> where N: HashStable> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + let traits::VtableTraitAliasData { + alias_def_id, + substs, + ref nested, + } = *self; + alias_def_id.hash_stable(hcx, hasher); + substs.hash_stable(hcx, hasher); + nested.hash_stable(hcx, hasher); + } +} + impl_stable_hash_for!( impl<'tcx, V> for struct infer::canonical::Canonical<'tcx, V> { - variables, value + max_universe, variables, value } ); impl_stable_hash_for!( - impl<'tcx> for struct infer::canonical::CanonicalVarValues<'tcx> { + struct infer::canonical::CanonicalVarValues<'tcx> { var_values } ); @@ -1288,11 +1101,13 @@ impl_stable_hash_for!(struct infer::canonical::CanonicalVarInfo { impl_stable_hash_for!(enum infer::canonical::CanonicalVarKind { Ty(k), - Region + PlaceholderTy(placeholder), + Region(ui), + PlaceholderRegion(placeholder), }); impl_stable_hash_for!(enum infer::canonical::CanonicalTyVarKind { - General, + General(ui), Int, Float }); @@ -1395,7 +1210,7 @@ impl<'a, 'tcx> HashStable> for traits::Goal<'tcx> { } impl_stable_hash_for!( - impl<'tcx> for struct traits::ProgramClause<'tcx> { + struct traits::ProgramClause<'tcx> { goal, hypotheses, category } ); @@ -1430,7 +1245,7 @@ impl_stable_hash_for!(struct ty::subst::UserSubsts<'tcx> { substs, user_self_ty impl_stable_hash_for!(struct ty::subst::UserSelfTy<'tcx> { impl_def_id, self_ty }); impl_stable_hash_for!( - impl<'tcx> for struct traits::Environment<'tcx> { + struct traits::Environment<'tcx> { clauses, } ); diff --git a/src/librustc/ich/mod.rs b/src/librustc/ich/mod.rs index a23bab6226..9751c560ac 100644 --- a/src/librustc/ich/mod.rs +++ b/src/librustc/ich/mod.rs @@ -11,10 +11,10 @@ //! ICH - Incremental Compilation Hash crate use rustc_data_structures::fingerprint::Fingerprint; -pub use self::caching_codemap_view::CachingSourceMapView; +pub use self::caching_source_map_view::CachingSourceMapView; pub use self::hcx::{StableHashingContextProvider, StableHashingContext, NodeIdHashingMode, hash_stable_trait_impls}; -mod caching_codemap_view; +mod caching_source_map_view; mod hcx; mod impls_cstore; diff --git a/src/librustc/infer/at.rs b/src/librustc/infer/at.rs index 0e4c94aaaf..70e922c667 100644 --- a/src/librustc/infer/at.rs +++ b/src/librustc/infer/at.rs @@ -52,6 +52,7 @@ pub struct Trace<'a, 'gcx: 'tcx, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + #[inline] pub fn at(&'a self, cause: &'a ObligationCause<'tcx>, param_env: ty::ParamEnv<'tcx>) diff --git a/src/librustc/infer/canonical/canonicalizer.rs b/src/librustc/infer/canonical/canonicalizer.rs index 2b085a3407..54bfe90099 100644 --- a/src/librustc/infer/canonical/canonicalizer.rs +++ b/src/librustc/infer/canonical/canonicalizer.rs @@ -10,10 +10,10 @@ //! This module contains the "canonicalizer" itself. //! -//! For an overview of what canonicaliation is and how it fits into +//! For an overview of what canonicalization is and how it fits into //! rustc, check out the [chapter in the rustc guide][c]. //! -//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html +//! [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html use infer::canonical::{ Canonical, CanonicalTyVarKind, CanonicalVarInfo, CanonicalVarKind, Canonicalized, @@ -23,7 +23,7 @@ use infer::InferCtxt; use std::sync::atomic::Ordering; use ty::fold::{TypeFoldable, TypeFolder}; use ty::subst::Kind; -use ty::{self, BoundTy, BoundTyIndex, Lift, List, Ty, TyCtxt, TypeFlags}; +use ty::{self, BoundVar, Lift, List, Ty, TyCtxt, TypeFlags}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::Idx; @@ -44,7 +44,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// To get a good understanding of what is happening here, check /// out the [chapter in the rustc guide][c]. /// - /// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html#canonicalizing-the-query + /// [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html#canonicalizing-the-query pub fn canonicalize_query( &self, value: &V, @@ -92,7 +92,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// To get a good understanding of what is happening here, check /// out the [chapter in the rustc guide][c]. /// - /// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html#canonicalizing-the-query-result + /// [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html#canonicalizing-the-query-result pub fn canonicalize_response(&self, value: &V) -> Canonicalized<'gcx, V> where V: TypeFoldable<'tcx> + Lift<'gcx>, @@ -107,6 +107,20 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { ) } + pub fn canonicalize_user_type_annotation(&self, value: &V) -> Canonicalized<'gcx, V> + where + V: TypeFoldable<'tcx> + Lift<'gcx>, + { + let mut query_state = OriginalQueryValues::default(); + Canonicalizer::canonicalize( + value, + Some(self), + self.tcx, + &CanonicalizeUserTypeAnnotation, + &mut query_state, + ) + } + /// A hacky variant of `canonicalize_query` that does not /// canonicalize `'static`. Unfortunately, the existing leak /// check treaks `'static` differently in some cases (see also @@ -162,11 +176,26 @@ struct CanonicalizeQueryResponse; impl CanonicalizeRegionMode for CanonicalizeQueryResponse { fn canonicalize_free_region( &self, - _canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>, + canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>, r: ty::Region<'tcx>, ) -> ty::Region<'tcx> { match r { ty::ReFree(_) | ty::ReEmpty | ty::ReErased | ty::ReStatic | ty::ReEarlyBound(..) => r, + ty::RePlaceholder(placeholder) => canonicalizer.canonical_var_for_region( + CanonicalVarInfo { + kind: CanonicalVarKind::PlaceholderRegion(*placeholder), + }, + r, + ), + ty::ReVar(vid) => { + let universe = canonicalizer.region_var_universe(*vid); + canonicalizer.canonical_var_for_region( + CanonicalVarInfo { + kind: CanonicalVarKind::Region(universe), + }, + r, + ) + } _ => { // Other than `'static` or `'empty`, the query // response should be executing in a fully @@ -182,6 +211,29 @@ impl CanonicalizeRegionMode for CanonicalizeQueryResponse { } } +struct CanonicalizeUserTypeAnnotation; + +impl CanonicalizeRegionMode for CanonicalizeUserTypeAnnotation { + fn canonicalize_free_region( + &self, + canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>, + r: ty::Region<'tcx>, + ) -> ty::Region<'tcx> { + match r { + ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReErased | ty::ReEmpty | ty::ReStatic => r, + ty::ReVar(_) => canonicalizer.canonical_var_for_region_in_root_universe(r), + _ => { + // We only expect region names that the user can type. + bug!("unexpected region in query response: `{:?}`", r) + } + } + } + + fn any(&self) -> bool { + false + } +} + struct CanonicalizeAllFreeRegions; impl CanonicalizeRegionMode for CanonicalizeAllFreeRegions { @@ -190,7 +242,7 @@ impl CanonicalizeRegionMode for CanonicalizeAllFreeRegions { canonicalizer: &mut Canonicalizer<'_, '_, 'tcx>, r: ty::Region<'tcx>, ) -> ty::Region<'tcx> { - canonicalizer.canonical_var_for_region(r) + canonicalizer.canonical_var_for_region_in_root_universe(r) } fn any(&self) -> bool { @@ -209,7 +261,7 @@ impl CanonicalizeRegionMode for CanonicalizeFreeRegionsOtherThanStatic { if let ty::ReStatic = r { r } else { - canonicalizer.canonical_var_for_region(r) + canonicalizer.canonical_var_for_region_in_root_universe(r) } } @@ -225,9 +277,11 @@ struct Canonicalizer<'cx, 'gcx: 'tcx, 'tcx: 'cx> { query_state: &'cx mut OriginalQueryValues<'tcx>, // Note that indices is only used once `var_values` is big enough to be // heap-allocated. - indices: FxHashMap, BoundTyIndex>, + indices: FxHashMap, BoundVar>, canonicalize_region_mode: &'cx dyn CanonicalizeRegionMode, needs_canonical_flags: TypeFlags, + + binder_index: ty::DebruijnIndex, } impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> { @@ -235,11 +289,23 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> self.tcx } + fn fold_binder(&mut self, t: &ty::Binder) -> ty::Binder + where T: TypeFoldable<'tcx> + { + self.binder_index.shift_in(1); + let t = t.super_fold_with(self); + self.binder_index.shift_out(1); + t + } + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { match *r { - ty::ReLateBound(..) => { - // leave bound regions alone - r + ty::ReLateBound(index, ..) => { + if index >= self.binder_index { + bug!("escaping late bound region during canonicalization") + } else { + r + } } ty::ReVar(vid) => { @@ -252,7 +318,8 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> opportunistically resolved to {:?}", vid, r ); - self.canonical_var_for_region(r) + self.canonicalize_region_mode + .canonicalize_free_region(self, r) } ty::ReStatic @@ -261,21 +328,52 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> | ty::ReScope(_) | ty::RePlaceholder(..) | ty::ReEmpty - | ty::ReErased => self.canonicalize_region_mode.canonicalize_free_region(self, r), + | ty::ReErased => self.canonicalize_region_mode + .canonicalize_free_region(self, r), - ty::ReClosureBound(..) | ty::ReCanonical(_) => { - bug!("canonical region encountered during canonicalization") + ty::ReClosureBound(..) => { + bug!("closure bound region encountered during canonicalization") } } } fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { match t.sty { - ty::Infer(ty::TyVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::General, t), + ty::Infer(ty::TyVar(vid)) => { + match self.infcx.unwrap().probe_ty_var(vid) { + // `t` could be a float / int variable: canonicalize that instead + Ok(t) => self.fold_ty(t), - ty::Infer(ty::IntVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Int, t), + // `TyVar(vid)` is unresolved, track its universe index in the canonicalized + // result + Err(mut ui) => { + if !self.infcx.unwrap().tcx.sess.opts.debugging_opts.chalk { + // FIXME: perf problem described in #55921. + ui = ty::UniverseIndex::ROOT; + } + self.canonicalize_ty_var( + CanonicalVarInfo { + kind: CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)) + }, + t + ) + } + } + } - ty::Infer(ty::FloatVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Float, t), + ty::Infer(ty::IntVar(_)) => self.canonicalize_ty_var( + CanonicalVarInfo { + kind: CanonicalVarKind::Ty(CanonicalTyVarKind::Int) + }, + t + ), + + ty::Infer(ty::FloatVar(_)) => self.canonicalize_ty_var( + CanonicalVarInfo { + kind: CanonicalVarKind::Ty(CanonicalTyVarKind::Float) + }, + t + ), ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) @@ -283,8 +381,19 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> bug!("encountered a fresh type during canonicalization") } - ty::Infer(ty::BoundTy(_)) => { - bug!("encountered a canonical type during canonicalization") + ty::Placeholder(placeholder) => self.canonicalize_ty_var( + CanonicalVarInfo { + kind: CanonicalVarKind::PlaceholderTy(placeholder) + }, + t + ), + + ty::Bound(debruijn, _) => { + if debruijn >= self.binder_index { + bug!("escaping bound type during canonicalization") + } else { + t + } } ty::Closure(..) @@ -335,16 +444,14 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { where V: TypeFoldable<'tcx> + Lift<'gcx>, { - debug_assert!( - !value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS), - "canonicalizing a canonical value: {:?}", - value, - ); - let needs_canonical_flags = if canonicalize_region_mode.any() { - TypeFlags::HAS_FREE_REGIONS | TypeFlags::KEEP_IN_LOCAL_TCX + TypeFlags::KEEP_IN_LOCAL_TCX | + TypeFlags::HAS_FREE_REGIONS | // `HAS_RE_PLACEHOLDER` implies `HAS_FREE_REGIONS` + TypeFlags::HAS_TY_PLACEHOLDER } else { - TypeFlags::KEEP_IN_LOCAL_TCX + TypeFlags::KEEP_IN_LOCAL_TCX | + TypeFlags::HAS_RE_PLACEHOLDER | + TypeFlags::HAS_TY_PLACEHOLDER }; let gcx = tcx.global_tcx(); @@ -353,6 +460,7 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { if !value.has_type_flags(needs_canonical_flags) { let out_value = gcx.lift(value).unwrap(); let canon_value = Canonical { + max_universe: ty::UniverseIndex::ROOT, variables: List::empty(), value: out_value, }; @@ -367,6 +475,7 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { variables: SmallVec::new(), query_state, indices: FxHashMap::default(), + binder_index: ty::INNERMOST, }; let out_value = value.fold_with(&mut canonicalizer); @@ -383,7 +492,14 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { let canonical_variables = tcx.intern_canonical_var_infos(&canonicalizer.variables); + let max_universe = canonical_variables + .iter() + .map(|cvar| cvar.universe()) + .max() + .unwrap_or(ty::UniverseIndex::ROOT); + Canonical { + max_universe, variables: canonical_variables, value: out_value, } @@ -393,7 +509,7 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { /// or returns an existing variable if `kind` has already been /// seen. `kind` is expected to be an unbound variable (or /// potentially a free region). - fn canonical_var(&mut self, info: CanonicalVarInfo, kind: Kind<'tcx>) -> BoundTy { + fn canonical_var(&mut self, info: CanonicalVarInfo, kind: Kind<'tcx>) -> BoundVar { let Canonicalizer { variables, query_state, @@ -413,7 +529,7 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { // direct linear search of `var_values`. if let Some(idx) = var_values.iter().position(|&k| k == kind) { // `kind` is already present in `var_values`. - BoundTyIndex::new(idx) + BoundVar::new(idx) } else { // `kind` isn't present in `var_values`. Append it. Likewise // for `info` and `variables`. @@ -428,11 +544,11 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { *indices = var_values .iter() .enumerate() - .map(|(i, &kind)| (kind, BoundTyIndex::new(i))) + .map(|(i, &kind)| (kind, BoundVar::new(i))) .collect(); } // The cv is the index of the appended element. - BoundTyIndex::new(var_values.len() - 1) + BoundVar::new(var_values.len() - 1) } } else { // `var_values` is large. Do a hashmap search via `indices`. @@ -440,41 +556,73 @@ impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { variables.push(info); var_values.push(kind); assert_eq!(variables.len(), var_values.len()); - BoundTyIndex::new(variables.len() - 1) + BoundVar::new(variables.len() - 1) }) }; - BoundTy { - level: ty::INNERMOST, - var, - } + var } - fn canonical_var_for_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { - let info = CanonicalVarInfo { - kind: CanonicalVarKind::Region, - }; - let b = self.canonical_var(info, r.into()); - debug_assert_eq!(ty::INNERMOST, b.level); - self.tcx().mk_region(ty::ReCanonical(b.var)) + /// Shorthand helper that creates a canonical region variable for + /// `r` (always in the root universe). The reason that we always + /// put these variables into the root universe is because this + /// method is used during **query construction:** in that case, we + /// are taking all the regions and just putting them into the most + /// generic context we can. This may generate solutions that don't + /// fit (e.g., that equate some region variable with a placeholder + /// it can't name) on the caller side, but that's ok, the caller + /// can figure that out. In the meantime, it maximizes our + /// caching. + /// + /// (This works because unification never fails -- and hence trait + /// selection is never affected -- due to a universe mismatch.) + fn canonical_var_for_region_in_root_universe( + &mut self, + r: ty::Region<'tcx>, + ) -> ty::Region<'tcx> { + self.canonical_var_for_region( + CanonicalVarInfo { + kind: CanonicalVarKind::Region(ty::UniverseIndex::ROOT), + }, + r, + ) + } + + /// Returns the universe in which `vid` is defined. + fn region_var_universe(&self, vid: ty::RegionVid) -> ty::UniverseIndex { + self.infcx + .unwrap() + .borrow_region_constraints() + .var_universe(vid) + } + + /// Create a canonical variable (with the given `info`) + /// representing the region `r`; return a region referencing it. + fn canonical_var_for_region( + &mut self, + info: CanonicalVarInfo, + r: ty::Region<'tcx>, + ) -> ty::Region<'tcx> { + let var = self.canonical_var(info, r.into()); + let region = ty::ReLateBound( + self.binder_index, + ty::BoundRegion::BrAnon(var.as_u32()) + ); + self.tcx().mk_region(region) } /// Given a type variable `ty_var` of the given kind, first check /// if `ty_var` is bound to anything; if so, canonicalize /// *that*. Otherwise, create a new canonical variable for /// `ty_var`. - fn canonicalize_ty_var(&mut self, ty_kind: CanonicalTyVarKind, ty_var: Ty<'tcx>) -> Ty<'tcx> { + fn canonicalize_ty_var(&mut self, info: CanonicalVarInfo, ty_var: Ty<'tcx>) -> Ty<'tcx> { let infcx = self.infcx.expect("encountered ty-var without infcx"); let bound_to = infcx.shallow_resolve(ty_var); if bound_to != ty_var { self.fold_ty(bound_to) } else { - let info = CanonicalVarInfo { - kind: CanonicalVarKind::Ty(ty_kind), - }; - let b = self.canonical_var(info, ty_var.into()); - debug_assert_eq!(ty::INNERMOST, b.level); - self.tcx().mk_infer(ty::InferTy::BoundTy(b)) + let var = self.canonical_var(info, ty_var.into()); + self.tcx().mk_ty(ty::Bound(self.binder_index, var.into())) } } } diff --git a/src/librustc/infer/canonical/mod.rs b/src/librustc/infer/canonical/mod.rs index e3bd407d17..6b0fa79b20 100644 --- a/src/librustc/infer/canonical/mod.rs +++ b/src/librustc/infer/canonical/mod.rs @@ -20,7 +20,7 @@ //! - a map M (of type `CanonicalVarValues`) from those canonical //! variables back to the original. //! -//! We can then do queries using T2. These will give back constriants +//! We can then do queries using T2. These will give back constraints //! on the canonical variables which can be translated, using the map //! M, into constraints in our source context. This process of //! translating the results back is done by the @@ -29,18 +29,18 @@ //! For a more detailed look at what is happening here, check //! out the [chapter in the rustc guide][c]. //! -//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html +//! [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html use infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin}; use rustc_data_structures::indexed_vec::IndexVec; -use smallvec::SmallVec; use rustc_data_structures::sync::Lrc; use serialize::UseSpecializedDecodable; +use smallvec::SmallVec; use std::ops::Index; use syntax::source_map::Span; use ty::fold::TypeFoldable; use ty::subst::Kind; -use ty::{self, BoundTyIndex, Lift, Region, List, TyCtxt}; +use ty::{self, BoundVar, Lift, List, Region, TyCtxt}; mod canonicalizer; @@ -53,6 +53,7 @@ mod substitute; /// numbered starting from 0 in order of first appearance. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] pub struct Canonical<'gcx, V> { + pub max_universe: ty::UniverseIndex, pub variables: CanonicalVarInfos<'gcx>, pub value: V, } @@ -72,20 +73,38 @@ impl<'gcx> UseSpecializedDecodable for CanonicalVarInfos<'gcx> {} /// canonicalized query response. #[derive(Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] pub struct CanonicalVarValues<'tcx> { - pub var_values: IndexVec>, + pub var_values: IndexVec>, } /// When we canonicalize a value to form a query, we wind up replacing /// various parts of it with canonical variables. This struct stores /// those replaced bits to remember for when we process the query /// result. -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] pub struct OriginalQueryValues<'tcx> { + /// Map from the universes that appear in the query to the + /// universes in the caller context. For the time being, we only + /// ever put ROOT values into the query, so this map is very + /// simple. + pub universe_map: SmallVec<[ty::UniverseIndex; 4]>, + /// This is equivalent to `CanonicalVarValues`, but using a /// `SmallVec` yields a significant performance win. pub var_values: SmallVec<[Kind<'tcx>; 8]>, } +impl Default for OriginalQueryValues<'tcx> { + fn default() -> Self { + let mut universe_map = SmallVec::default(); + universe_map.push(ty::UniverseIndex::ROOT); + + Self { + universe_map, + var_values: SmallVec::default(), + } + } +} + /// Information about a canonical variable that is included with the /// canonical value. This is sufficient information for code to create /// a copy of the canonical value in some other inference context, @@ -95,6 +114,21 @@ pub struct CanonicalVarInfo { pub kind: CanonicalVarKind, } +impl CanonicalVarInfo { + pub fn universe(&self) -> ty::UniverseIndex { + self.kind.universe() + } + + pub fn is_existential(&self) -> bool { + match self.kind { + CanonicalVarKind::Ty(_) => true, + CanonicalVarKind::PlaceholderTy(_) => false, + CanonicalVarKind::Region(_) => true, + CanonicalVarKind::PlaceholderRegion(..) => false, + } + } +} + /// Describes the "kind" of the canonical variable. This is a "kind" /// in the type-theory sense of the term -- i.e., a "meta" type system /// that analyzes type-like values. @@ -103,8 +137,31 @@ pub enum CanonicalVarKind { /// Some kind of type inference variable. Ty(CanonicalTyVarKind), + /// A "placeholder" that represents "any type". + PlaceholderTy(ty::PlaceholderType), + /// Region variable `'?R`. - Region, + Region(ty::UniverseIndex), + + /// A "placeholder" that represents "any region". Created when you + /// are solving a goal like `for<'a> T: Foo<'a>` to represent the + /// bound region `'a`. + PlaceholderRegion(ty::PlaceholderRegion), +} + +impl CanonicalVarKind { + pub fn universe(self) -> ty::UniverseIndex { + match self { + CanonicalVarKind::Ty(kind) => match kind { + CanonicalTyVarKind::General(ui) => ui, + CanonicalTyVarKind::Float | CanonicalTyVarKind::Int => ty::UniverseIndex::ROOT, + } + + CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe, + CanonicalVarKind::Region(ui) => ui, + CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe, + } + } } /// Rust actually has more than one category of type variables; @@ -115,7 +172,7 @@ pub enum CanonicalVarKind { #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] pub enum CanonicalTyVarKind { /// General type variable `?T` that can be unified with arbitrary types. - General, + General(ty::UniverseIndex), /// Integral type variable `?I` (that can only be unified with integral types). Int, @@ -220,8 +277,16 @@ impl<'gcx, V> Canonical<'gcx, V> { /// let b: Canonical<'tcx, (T, Ty<'tcx>)> = a.unchecked_map(|v| (v, ty)); /// ``` pub fn unchecked_map(self, map_op: impl FnOnce(V) -> W) -> Canonical<'gcx, W> { - let Canonical { variables, value } = self; - Canonical { variables, value: map_op(value) } + let Canonical { + max_universe, + variables, + value, + } = self; + Canonical { + max_universe, + variables, + value: map_op(value), + } } } @@ -249,41 +314,59 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { where T: TypeFoldable<'tcx>, { + // For each universe that is referred to in the incoming + // query, create a universe in our local inference context. In + // practice, as of this writing, all queries have no universes + // in them, so this code has no effect, but it is looking + // forward to the day when we *do* want to carry universes + // through into queries. + let universes: IndexVec = std::iter::once(ty::UniverseIndex::ROOT) + .chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe())) + .collect(); + let canonical_inference_vars = - self.fresh_inference_vars_for_canonical_vars(span, canonical.variables); + self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]); let result = canonical.substitute(self.tcx, &canonical_inference_vars); (result, canonical_inference_vars) } /// Given the "infos" about the canonical variables from some - /// canonical, creates fresh inference variables with the same - /// characteristics. You can then use `substitute` to instantiate - /// the canonical variable with these inference variables. - fn fresh_inference_vars_for_canonical_vars( + /// canonical, creates fresh variables with the same + /// characteristics (see `instantiate_canonical_var` for + /// details). You can then use `substitute` to instantiate the + /// canonical variable with these inference variables. + fn instantiate_canonical_vars( &self, span: Span, variables: &List, + universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> CanonicalVarValues<'tcx> { - let var_values: IndexVec> = variables + let var_values: IndexVec> = variables .iter() - .map(|info| self.fresh_inference_var_for_canonical_var(span, *info)) + .map(|info| self.instantiate_canonical_var(span, *info, &universe_map)) .collect(); CanonicalVarValues { var_values } } /// Given the "info" about a canonical variable, creates a fresh - /// inference variable with the same characteristics. - fn fresh_inference_var_for_canonical_var( + /// variable for it. If this is an existentially quantified + /// variable, then you'll get a new inference variable; if it is a + /// universally quantified variable, you get a placeholder. + fn instantiate_canonical_var( &self, span: Span, cv_info: CanonicalVarInfo, + universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex, ) -> Kind<'tcx> { match cv_info.kind { CanonicalVarKind::Ty(ty_kind) => { let ty = match ty_kind { - CanonicalTyVarKind::General => { - self.next_ty_var(TypeVariableOrigin::MiscVariable(span)) + CanonicalTyVarKind::General(ui) => { + self.next_ty_var_in_universe( + TypeVariableOrigin::MiscVariable(span), + universe_map(ui) + ) } CanonicalTyVarKind::Int => self.tcx.mk_int_var(self.next_int_var_id()), @@ -293,9 +376,28 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { ty.into() } - CanonicalVarKind::Region => self - .next_region_var(RegionVariableOrigin::MiscVariable(span)) - .into(), + CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) => { + let universe_mapped = universe_map(universe); + let placeholder_mapped = ty::PlaceholderType { + universe: universe_mapped, + name, + }; + self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into() + } + + CanonicalVarKind::Region(ui) => self.next_region_var_in_universe( + RegionVariableOrigin::MiscVariable(span), + universe_map(ui), + ).into(), + + CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => { + let universe_mapped = universe_map(universe); + let placeholder_mapped = ty::PlaceholderRegion { + universe: universe_mapped, + name, + }; + self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into() + } } } } @@ -314,6 +416,7 @@ CloneTypeFoldableImpls! { BraceStructTypeFoldableImpl! { impl<'tcx, C> TypeFoldable<'tcx> for Canonical<'tcx, C> { + max_universe, variables, value, } where C: TypeFoldable<'tcx> @@ -322,7 +425,7 @@ BraceStructTypeFoldableImpl! { BraceStructLiftImpl! { impl<'a, 'tcx, T> Lift<'tcx> for Canonical<'a, T> { type Lifted = Canonical<'tcx, T::Lifted>; - variables, value + max_universe, variables, value } where T: Lift<'tcx> } @@ -367,10 +470,10 @@ BraceStructLiftImpl! { } where R: Lift<'tcx> } -impl<'tcx> Index for CanonicalVarValues<'tcx> { +impl<'tcx> Index for CanonicalVarValues<'tcx> { type Output = Kind<'tcx>; - fn index(&self, value: BoundTyIndex) -> &Kind<'tcx> { + fn index(&self, value: BoundVar) -> &Kind<'tcx> { &self.var_values[value] } } diff --git a/src/librustc/infer/canonical/query_response.rs b/src/librustc/infer/canonical/query_response.rs index 38788186eb..8d2b1d74c5 100644 --- a/src/librustc/infer/canonical/query_response.rs +++ b/src/librustc/infer/canonical/query_response.rs @@ -15,7 +15,7 @@ //! For an overview of what canonicaliation is and how it fits into //! rustc, check out the [chapter in the rustc guide][c]. //! -//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html +//! [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html use infer::canonical::substitute::substitute_value; use infer::canonical::{ @@ -35,7 +35,7 @@ use traits::{FulfillmentContext, TraitEngine}; use traits::{Obligation, ObligationCause, PredicateObligation}; use ty::fold::TypeFoldable; use ty::subst::{Kind, UnpackedKind}; -use ty::{self, BoundTyIndex, Lift, Ty, TyCtxt}; +use ty::{self, BoundVar, Lift, Ty, TyCtxt}; impl<'cx, 'gcx, 'tcx> InferCtxtBuilder<'cx, 'gcx, 'tcx> { /// The "main method" for a canonicalized trait query. Given the @@ -184,7 +184,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// To get a good understanding of what is happening here, check /// out the [chapter in the rustc guide][c]. /// - /// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html#processing-the-canonicalized-query-result + /// [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html#processing-the-canonicalized-query-result pub fn instantiate_query_response_and_region_obligations( &self, cause: &ObligationCause<'tcx>, @@ -273,7 +273,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { for (index, original_value) in original_values.var_values.iter().enumerate() { // ...with the value `v_r` of that variable from the query. let result_value = query_response.substitute_projected(self.tcx, &result_subst, |v| { - &v.var_values[BoundTyIndex::new(index)] + &v.var_values[BoundVar::new(index)] }); match (original_value.unpack(), result_value.unpack()) { (UnpackedKind::Lifetime(ty::ReErased), UnpackedKind::Lifetime(ty::ReErased)) => { @@ -308,11 +308,14 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { // ...also include the other query region constraints from the query. output_query_region_constraints.extend( query_response.value.region_constraints.iter().filter_map(|r_c| { - let &ty::OutlivesPredicate(k1, r2) = r_c.skip_binder(); // reconstructed below - let k1 = substitute_value(self.tcx, &result_subst, &k1); - let r2 = substitute_value(self.tcx, &result_subst, &r2); + let r_c = substitute_value(self.tcx, &result_subst, r_c); + + // Screen out `'a: 'a` cases -- we skip the binder here but + // only care the inner values to one another, so they are still at + // consistent binding levels. + let &ty::OutlivesPredicate(k1, r2) = r_c.skip_binder(); if k1 != r2.into() { - Some(ty::Binder::bind(ty::OutlivesPredicate(k1, r2))) + Some(r_c) } else { None } @@ -394,6 +397,21 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { original_values, query_response, ); + // For each new universe created in the query result that did + // not appear in the original query, create a local + // superuniverse. + let mut universe_map = original_values.universe_map.clone(); + let num_universes_in_query = original_values.universe_map.len(); + let num_universes_in_response = query_response.max_universe.as_usize() + 1; + for _ in num_universes_in_query..num_universes_in_response { + universe_map.push(self.create_next_universe()); + } + assert!(universe_map.len() >= 1); // always have the root universe + assert_eq!( + universe_map[ty::UniverseIndex::ROOT.as_usize()], + ty::UniverseIndex::ROOT + ); + // Every canonical query result includes values for each of // the inputs to the query. Therefore, we begin by unifying // these values with the original inputs that were @@ -408,7 +426,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { // is directly equal to one of the canonical variables in the // result, then we can type the corresponding value from the // input. See the example above. - let mut opt_values: IndexVec>> = + let mut opt_values: IndexVec>> = IndexVec::from_elem_n(None, query_response.variables.len()); // In terms of our example above, we are iterating over pairs like: @@ -417,16 +435,22 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { match result_value.unpack() { UnpackedKind::Type(result_value) => { // e.g., here `result_value` might be `?0` in the example above... - if let ty::Infer(ty::InferTy::BoundTy(b)) = result_value.sty { - // in which case we would set `canonical_vars[0]` to `Some(?U)`. + if let ty::Bound(debruijn, b) = result_value.sty { + // ...in which case we would set `canonical_vars[0]` to `Some(?U)`. + + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(debruijn, ty::INNERMOST); opt_values[b.var] = Some(*original_value); } } UnpackedKind::Lifetime(result_value) => { // e.g., here `result_value` might be `'?1` in the example above... - if let &ty::RegionKind::ReCanonical(index) = result_value { - // in which case we would set `canonical_vars[0]` to `Some('static)`. - opt_values[index] = Some(*original_value); + if let &ty::RegionKind::ReLateBound(debruijn, br) = result_value { + // ... in which case we would set `canonical_vars[0]` to `Some('static)`. + + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(debruijn, ty::INNERMOST); + opt_values[br.assert_bound_var()] = Some(*original_value); } } } @@ -440,9 +464,20 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { .variables .iter() .enumerate() - .map(|(index, info)| opt_values[BoundTyIndex::new(index)].unwrap_or_else(|| - self.fresh_inference_var_for_canonical_var(cause.span, *info) - )) + .map(|(index, info)| { + if info.is_existential() { + match opt_values[BoundVar::new(index)] { + Some(k) => k, + None => self.instantiate_canonical_var(cause.span, *info, |u| { + universe_map[u.as_usize()] + }), + } + } else { + self.instantiate_canonical_var(cause.span, *info, |u| { + universe_map[u.as_usize()] + }) + } + }) .collect(), }; @@ -470,7 +505,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { // canonical variable; this is taken from // `query_response.var_values` after applying the substitution // `result_subst`. - let substituted_query_response = |index: BoundTyIndex| -> Kind<'tcx> { + let substituted_query_response = |index: BoundVar| -> Kind<'tcx> { query_response.substitute_projected(self.tcx, &result_subst, |v| &v.var_values[index]) }; @@ -497,22 +532,23 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { unsubstituted_region_constraints .iter() .map(move |constraint| { - let ty::OutlivesPredicate(k1, r2) = constraint.skip_binder(); // restored below - let k1 = substitute_value(self.tcx, result_subst, k1); - let r2 = substitute_value(self.tcx, result_subst, r2); + let constraint = substitute_value(self.tcx, result_subst, constraint); + let &ty::OutlivesPredicate(k1, r2) = constraint.skip_binder(); // restored below Obligation::new( cause.clone(), param_env, match k1.unpack() { UnpackedKind::Lifetime(r1) => ty::Predicate::RegionOutlives( - ty::Binder::dummy( + ty::Binder::bind( ty::OutlivesPredicate(r1, r2) - )), + ) + ), UnpackedKind::Type(t1) => ty::Predicate::TypeOutlives( - ty::Binder::dummy(ty::OutlivesPredicate( - t1, r2 - ))) + ty::Binder::bind( + ty::OutlivesPredicate(t1, r2) + ) + ), } ) }) @@ -520,18 +556,18 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { } /// Given two sets of values for the same set of canonical variables, unify them. - /// The second set is produced lazilly by supplying indices from the first set. + /// The second set is produced lazily by supplying indices from the first set. fn unify_canonical_vars( &self, cause: &ObligationCause<'tcx>, param_env: ty::ParamEnv<'tcx>, variables1: &OriginalQueryValues<'tcx>, - variables2: impl Fn(BoundTyIndex) -> Kind<'tcx>, + variables2: impl Fn(BoundVar) -> Kind<'tcx>, ) -> InferResult<'tcx, ()> { self.commit_if_ok(|_| { let mut obligations = vec![]; for (index, value1) in variables1.var_values.iter().enumerate() { - let value2 = variables2(BoundTyIndex::new(index)); + let value2 = variables2(BoundVar::new(index)); match (value1.unpack(), value2.unpack()) { (UnpackedKind::Type(v1), UnpackedKind::Type(v2)) => { @@ -594,11 +630,11 @@ pub fn make_query_outlives<'tcx>( } Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1), }) - .map(ty::Binder::dummy) // no bound regions in the code above + .map(ty::Binder::dummy) // no bound vars in the code above .chain( outlives_obligations .map(|(ty, r)| ty::OutlivesPredicate(ty.into(), r)) - .map(ty::Binder::dummy), // no bound regions in the code above + .map(ty::Binder::dummy) // no bound vars in the code above ) .collect(); diff --git a/src/librustc/infer/canonical/substitute.rs b/src/librustc/infer/canonical/substitute.rs index 03441c3dee..ab575882f8 100644 --- a/src/librustc/infer/canonical/substitute.rs +++ b/src/librustc/infer/canonical/substitute.rs @@ -11,15 +11,15 @@ //! This module contains code to substitute new values into a //! `Canonical<'tcx, T>`. //! -//! For an overview of what canonicaliation is and how it fits into +//! For an overview of what canonicalization is and how it fits into //! rustc, check out the [chapter in the rustc guide][c]. //! -//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html +//! [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html use infer::canonical::{Canonical, CanonicalVarValues}; -use ty::fold::{TypeFoldable, TypeFolder}; +use ty::fold::TypeFoldable; use ty::subst::UnpackedKind; -use ty::{self, Ty, TyCtxt, TypeFlags}; +use ty::{self, TyCtxt}; impl<'tcx, V> Canonical<'tcx, V> { /// Instantiate the wrapped value, replacing each canonical value @@ -64,51 +64,22 @@ where T: TypeFoldable<'tcx>, { if var_values.var_values.is_empty() { - debug_assert!(!value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS)); - value.clone() - } else if !value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS) { value.clone() } else { - value.fold_with(&mut CanonicalVarValuesSubst { tcx, var_values }) - } -} - -struct CanonicalVarValuesSubst<'cx, 'gcx: 'tcx, 'tcx: 'cx> { - tcx: TyCtxt<'cx, 'gcx, 'tcx>, - var_values: &'cx CanonicalVarValues<'tcx>, -} - -impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for CanonicalVarValuesSubst<'cx, 'gcx, 'tcx> { - fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx> { - self.tcx - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - match t.sty { - ty::Infer(ty::InferTy::BoundTy(b)) => { - debug_assert_eq!(ty::INNERMOST, b.level); - match self.var_values.var_values[b.var].unpack() { - UnpackedKind::Type(ty) => ty, - r => bug!("{:?} is a type but value is {:?}", b, r), - } - } - _ => { - if !t.has_type_flags(TypeFlags::HAS_CANONICAL_VARS) { - t - } else { - t.super_fold_with(self) - } - } - } - } - - fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { - match r { - ty::RegionKind::ReCanonical(c) => match self.var_values.var_values[*c].unpack() { + let fld_r = |br: ty::BoundRegion| { + match var_values.var_values[br.assert_bound_var()].unpack() { UnpackedKind::Lifetime(l) => l, - r => bug!("{:?} is a region but value is {:?}", c, r), - }, - _ => r.super_fold_with(self), - } + r => bug!("{:?} is a region but value is {:?}", br, r), + } + }; + + let fld_t = |bound_ty: ty::BoundTy| { + match var_values.var_values[bound_ty.var].unpack() { + UnpackedKind::Type(ty) => ty, + r => bug!("{:?} is a type but value is {:?}", bound_ty, r), + } + }; + + tcx.replace_escaping_bound_vars(value, fld_r, fld_t).0 } } diff --git a/src/librustc/infer/combine.rs b/src/librustc/infer/combine.rs index 0ee03bc4c6..f13210926a 100644 --- a/src/librustc/infer/combine.rs +++ b/src/librustc/infer/combine.rs @@ -485,7 +485,6 @@ impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, ' } } - ty::ReCanonical(..) | ty::ReClosureBound(..) => { span_bug!( self.span, diff --git a/src/librustc/infer/error_reporting/mod.rs b/src/librustc/infer/error_reporting/mod.rs index d19c495af3..59a490f4a0 100644 --- a/src/librustc/infer/error_reporting/mod.rs +++ b/src/librustc/infer/error_reporting/mod.rs @@ -152,7 +152,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } // We shouldn't encounter an error message with ReClosureBound. - ty::ReCanonical(..) | ty::ReClosureBound(..) => { + ty::ReClosureBound(..) => { bug!("encountered unexpected ReClosureBound: {:?}", region,); } }; @@ -178,6 +178,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.msg_span_from_early_bound_and_free_regions(region) } ty::ReStatic => ("the static lifetime".to_owned(), None), + ty::ReEmpty => ("an empty lifetime".to_owned(), None), _ => bug!("{:?}", region), } } @@ -1314,7 +1315,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { format!(" for lifetime parameter `{}` in coherence check", name) } infer::UpvarRegion(ref upvar_id, _) => { - let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_path.hir_id); let var_name = self.tcx.hir.name(var_node_id); format!(" for capture of `{}` by closure", var_name) } diff --git a/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs b/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs index 009a823568..7a92b3084b 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs @@ -20,7 +20,7 @@ use util::common::ErrorReported; use infer::lexical_region_resolve::RegionResolutionError::SubSupConflict; impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { - /// Print the error message for lifetime errors when binding excapes a closure. + /// Print the error message for lifetime errors when binding escapes a closure. /// /// Consider a case where we have /// diff --git a/src/librustc/infer/error_reporting/note.rs b/src/librustc/infer/error_reporting/note.rs index 54d01a035a..a539c321af 100644 --- a/src/librustc/infer/error_reporting/note.rs +++ b/src/librustc/infer/error_reporting/note.rs @@ -41,7 +41,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { "...so that reference does not outlive borrowed content"); } infer::ReborrowUpvar(span, ref upvar_id) => { - let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_path.hir_id); let var_name = self.tcx.hir.name(var_node_id); err.span_note(span, &format!("...so that closure can access `{}`", var_name)); @@ -174,7 +174,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { err } infer::ReborrowUpvar(span, ref upvar_id) => { - let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_path.hir_id); let var_name = self.tcx.hir.name(var_node_id); let mut err = struct_span_err!(self.tcx.sess, span, diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs index 1647f259db..d17cf0c7b4 100644 --- a/src/librustc/infer/freshen.rs +++ b/src/librustc/infer/freshen.rs @@ -114,7 +114,6 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { self.tcx().types.re_erased } - ty::ReCanonical(..) | ty::ReClosureBound(..) => { bug!( "encountered unexpected region: {:?}", @@ -171,9 +170,6 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { t } - ty::Infer(ty::BoundTy(..)) => - bug!("encountered canonical ty during freshening"), - ty::Generator(..) | ty::Bool | ty::Char | @@ -201,6 +197,9 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::Opaque(..) => { t.super_fold_with(self) } + + ty::Placeholder(..) | + ty::Bound(..) => bug!("unexpected type {:?}", t), } } } diff --git a/src/librustc/infer/glb.rs b/src/librustc/infer/glb.rs index fd14e0e40e..8968c5949b 100644 --- a/src/librustc/infer/glb.rs +++ b/src/librustc/infer/glb.rs @@ -15,7 +15,6 @@ use super::Subtype; use traits::ObligationCause; use ty::{self, Ty, TyCtxt}; -use ty::error::TypeError; use ty::relate::{Relate, RelateResult, TypeRelation}; /// "Greatest lower bound" (common subtype) @@ -76,31 +75,12 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> where T: Relate<'tcx> { debug!("binders(a={:?}, b={:?})", a, b); - let was_error = self.infcx().probe(|_snapshot| { - // Subtle: use a fresh combine-fields here because we recover - // from Err. Doing otherwise could propagate obligations out - // through our `self.obligations` field. - self.infcx() - .combine_fields(self.fields.trace.clone(), self.fields.param_env) - .higher_ranked_glb(a, b, self.a_is_expected) - .is_err() - }); - debug!("binders: was_error={:?}", was_error); // When higher-ranked types are involved, computing the LUB is // very challenging, switch to invariance. This is obviously // overly conservative but works ok in practice. - match self.relate_with_variance(ty::Variance::Invariant, a, b) { - Ok(_) => Ok(a.clone()), - Err(err) => { - debug!("binders: error occurred, was_error={:?}", was_error); - if !was_error { - Err(TypeError::OldStyleLUB(Box::new(err))) - } else { - Err(err) - } - } - } + self.relate_with_variance(ty::Variance::Invariant, a, b)?; + Ok(a.clone()) } } diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs index d85a3e84f8..cf91b85807 100644 --- a/src/librustc/infer/higher_ranked/mod.rs +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -22,7 +22,6 @@ use super::region_constraints::{TaintDirections}; use ty::{self, TyCtxt, Binder, TypeFoldable}; use ty::error::TypeError; use ty::relate::{Relate, RelateResult, TypeRelation}; -use std::collections::BTreeMap; use syntax_pos::Span; use util::nodemap::{FxHashMap, FxHashSet}; @@ -54,17 +53,17 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // First, we instantiate each bound region in the supertype with a // fresh placeholder region. let (b_prime, placeholder_map) = - self.infcx.replace_late_bound_regions_with_placeholders(b); + self.infcx.replace_bound_vars_with_placeholders(b); // Next, we instantiate each bound region in the subtype // with a fresh region variable. These region variables -- // but no other pre-existing region variables -- can name // the placeholders. - let (a_prime, _) = - self.infcx.replace_late_bound_regions_with_fresh_var( - span, - HigherRankedType, - a); + let (a_prime, _) = self.infcx.replace_bound_vars_with_fresh_vars( + span, + HigherRankedType, + a + ); debug!("a_prime={:?}", a_prime); debug!("b_prime={:?}", b_prime); @@ -116,7 +115,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // First, we instantiate each bound region in the matcher // with a placeholder region. let ((a_match, a_value), placeholder_map) = - self.infcx.replace_late_bound_regions_with_placeholders(a_pair); + self.infcx.replace_bound_vars_with_placeholders(a_pair); debug!("higher_ranked_match: a_match={:?}", a_match); debug!("higher_ranked_match: placeholder_map={:?}", placeholder_map); @@ -202,261 +201,6 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { Ok(HrMatchResult { value: a_value }) }); } - - pub fn higher_ranked_lub(&mut self, a: &Binder, b: &Binder, a_is_expected: bool) - -> RelateResult<'tcx, Binder> - where T: Relate<'tcx> - { - // Start a snapshot so we can examine "all bindings that were - // created as part of this type comparison". - return self.infcx.commit_if_ok(|snapshot| { - // Instantiate each bound region with a fresh region variable. - let span = self.trace.cause.span; - let (a_with_fresh, a_map) = - self.infcx.replace_late_bound_regions_with_fresh_var( - span, HigherRankedType, a); - let (b_with_fresh, _) = - self.infcx.replace_late_bound_regions_with_fresh_var( - span, HigherRankedType, b); - - // Collect constraints. - let result0 = - self.lub(a_is_expected).relate(&a_with_fresh, &b_with_fresh)?; - let result0 = - self.infcx.resolve_type_vars_if_possible(&result0); - debug!("lub result0 = {:?}", result0); - - // Generalize the regions appearing in result0 if possible - let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot); - let span = self.trace.cause.span; - let result1 = - fold_regions_in( - self.tcx(), - &result0, - |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn, - &new_vars, &a_map, r)); - - debug!("lub({:?},{:?}) = {:?}", - a, - b, - result1); - - Ok(ty::Binder::bind(result1)) - }); - - fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - span: Span, - snapshot: &CombinedSnapshot<'a, 'tcx>, - debruijn: ty::DebruijnIndex, - new_vars: &[ty::RegionVid], - a_map: &BTreeMap>, - r0: ty::Region<'tcx>) - -> ty::Region<'tcx> { - // Regions that pre-dated the LUB computation stay as they are. - if !is_var_in_set(new_vars, r0) { - assert!(!r0.is_late_bound()); - debug!("generalize_region(r0={:?}): not new variable", r0); - return r0; - } - - let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both()); - - // Variables created during LUB computation which are - // *related* to regions that pre-date the LUB computation - // stay as they are. - if !tainted.iter().all(|&r| is_var_in_set(new_vars, r)) { - debug!("generalize_region(r0={:?}): \ - non-new-variables found in {:?}", - r0, tainted); - assert!(!r0.is_late_bound()); - return r0; - } - - // Otherwise, the variable must be associated with at - // least one of the variables representing bound regions - // in both A and B. Replace the variable with the "first" - // bound region from A that we find it to be associated - // with. - for (a_br, a_r) in a_map { - if tainted.iter().any(|x| x == a_r) { - debug!("generalize_region(r0={:?}): \ - replacing with {:?}, tainted={:?}", - r0, *a_br, tainted); - return infcx.tcx.mk_region(ty::ReLateBound(debruijn, *a_br)); - } - } - - span_bug!( - span, - "region {:?} is not associated with any bound region from A!", - r0) - } - } - - pub fn higher_ranked_glb(&mut self, a: &Binder, b: &Binder, a_is_expected: bool) - -> RelateResult<'tcx, Binder> - where T: Relate<'tcx> - { - debug!("higher_ranked_glb({:?}, {:?})", - a, b); - - // Make a snapshot so we can examine "all bindings that were - // created as part of this type comparison". - return self.infcx.commit_if_ok(|snapshot| { - // Instantiate each bound region with a fresh region variable. - let (a_with_fresh, a_map) = - self.infcx.replace_late_bound_regions_with_fresh_var( - self.trace.cause.span, HigherRankedType, a); - let (b_with_fresh, b_map) = - self.infcx.replace_late_bound_regions_with_fresh_var( - self.trace.cause.span, HigherRankedType, b); - let a_vars = var_ids(self, &a_map); - let b_vars = var_ids(self, &b_map); - - // Collect constraints. - let result0 = - self.glb(a_is_expected).relate(&a_with_fresh, &b_with_fresh)?; - let result0 = - self.infcx.resolve_type_vars_if_possible(&result0); - debug!("glb result0 = {:?}", result0); - - // Generalize the regions appearing in result0 if possible - let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot); - let span = self.trace.cause.span; - let result1 = - fold_regions_in( - self.tcx(), - &result0, - |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn, - &new_vars, - &a_map, &a_vars, &b_vars, - r)); - - debug!("glb({:?},{:?}) = {:?}", - a, - b, - result1); - - Ok(ty::Binder::bind(result1)) - }); - - fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - span: Span, - snapshot: &CombinedSnapshot<'a, 'tcx>, - debruijn: ty::DebruijnIndex, - new_vars: &[ty::RegionVid], - a_map: &BTreeMap>, - a_vars: &[ty::RegionVid], - b_vars: &[ty::RegionVid], - r0: ty::Region<'tcx>) - -> ty::Region<'tcx> { - if !is_var_in_set(new_vars, r0) { - assert!(!r0.is_late_bound()); - return r0; - } - - let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both()); - - let mut a_r = None; - let mut b_r = None; - let mut only_new_vars = true; - for r in &tainted { - if is_var_in_set(a_vars, *r) { - if a_r.is_some() { - return fresh_bound_variable(infcx, debruijn); - } else { - a_r = Some(*r); - } - } else if is_var_in_set(b_vars, *r) { - if b_r.is_some() { - return fresh_bound_variable(infcx, debruijn); - } else { - b_r = Some(*r); - } - } else if !is_var_in_set(new_vars, *r) { - only_new_vars = false; - } - } - - // NB---I do not believe this algorithm computes - // (necessarily) the GLB. As written it can - // spuriously fail. In particular, if there is a case - // like: |fn(&a)| and fn(fn(&b)), where a and b are - // free, it will return fn(&c) where c = GLB(a,b). If - // however this GLB is not defined, then the result is - // an error, even though something like - // "fn(fn(&X))" where X is bound would be a - // subtype of both of those. - // - // The problem is that if we were to return a bound - // variable, we'd be computing a lower-bound, but not - // necessarily the *greatest* lower-bound. - // - // Unfortunately, this problem is non-trivial to solve, - // because we do not know at the time of computing the GLB - // whether a GLB(a,b) exists or not, because we haven't - // run region inference (or indeed, even fully computed - // the region hierarchy!). The current algorithm seems to - // works ok in practice. - - if a_r.is_some() && b_r.is_some() && only_new_vars { - // Related to exactly one bound variable from each fn: - return rev_lookup(infcx, span, a_map, a_r.unwrap()); - } else if a_r.is_none() && b_r.is_none() { - // Not related to bound variables from either fn: - assert!(!r0.is_late_bound()); - return r0; - } else { - // Other: - return fresh_bound_variable(infcx, debruijn); - } - } - - fn rev_lookup<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - span: Span, - a_map: &BTreeMap>, - r: ty::Region<'tcx>) -> ty::Region<'tcx> - { - for (a_br, a_r) in a_map { - if *a_r == r { - return infcx.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, *a_br)); - } - } - span_bug!( - span, - "could not find original bound region for {:?}", - r); - } - - fn fresh_bound_variable<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - debruijn: ty::DebruijnIndex) - -> ty::Region<'tcx> { - infcx.borrow_region_constraints().new_bound(infcx.tcx, debruijn) - } - } -} - -fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>, - map: &BTreeMap>) - -> Vec { - map.iter() - .map(|(_, &r)| match *r { - ty::ReVar(r) => { r } - _ => { - span_bug!( - fields.trace.cause.span, - "found non-region-vid: {:?}", - r); - } - }) - .collect() -} - -fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region<'_>) -> bool { - match *r { - ty::ReVar(ref v) => new_vars.iter().any(|x| x == v), - _ => false - } } fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, @@ -570,10 +314,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { region_vars } - /// Replace all regions bound by `binder` with placeholder regions and - /// return a map indicating which bound-region was replaced with what - /// placeholder region. This is the first step of checking subtyping - /// when higher-ranked things are involved. + /// Replace all regions (resp. types) bound by `binder` with placeholder + /// regions (resp. types) and return a map indicating which bound-region + /// was replaced with what placeholder region. This is the first step of + /// checking subtyping when higher-ranked things are involved. /// /// **Important:** you must call this function from within a snapshot. /// Moreover, before committing the snapshot, you must eventually call @@ -585,27 +329,38 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// For more information about how placeholders and HRTBs work, see /// the [rustc guide]. /// - /// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/hrtb.html - pub fn replace_late_bound_regions_with_placeholders( + /// [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/hrtb.html + pub fn replace_bound_vars_with_placeholders( &self, - binder: &ty::Binder, + binder: &ty::Binder ) -> (T, PlaceholderMap<'tcx>) where - T : TypeFoldable<'tcx>, + T: TypeFoldable<'tcx> { let next_universe = self.create_next_universe(); - let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| { - self.tcx.mk_region(ty::RePlaceholder(ty::Placeholder { + let fld_r = |br| { + self.tcx.mk_region(ty::RePlaceholder(ty::PlaceholderRegion { universe: next_universe, name: br, })) - }); + }; - debug!("replace_late_bound_regions_with_placeholders(binder={:?}, result={:?}, map={:?})", - binder, - result, - map); + let fld_t = |bound_ty: ty::BoundTy| { + self.tcx.mk_ty(ty::Placeholder(ty::PlaceholderType { + universe: next_universe, + name: bound_ty.var, + })) + }; + + let (result, map) = self.tcx.replace_bound_vars(binder, fld_r, fld_t); + + debug!( + "replace_bound_vars_with_placeholders(binder={:?}, result={:?}, map={:?})", + binder, + result, + map + ); (result, map) } @@ -684,7 +439,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// /// This routine is only intended to be used when the leak-check has /// passed; currently, it's used in the trait matching code to create - /// a set of nested obligations frmo an impl that matches against + /// a set of nested obligations from an impl that matches against /// something higher-ranked. More details can be found in /// `librustc/middle/traits/README.md`. /// @@ -786,7 +541,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Pops the placeholder regions found in `placeholder_map` from the region /// inference context. Whenever you create placeholder regions via - /// `replace_late_bound_regions_with_placeholders`, they must be popped before you + /// `replace_bound_vars_with_placeholders`, they must be popped before you /// commit the enclosing snapshot (if you do not commit, e.g. within a /// probe or as a result of an error, then this is not necessary, as /// popping happens as part of the rollback). @@ -799,11 +554,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ) { debug!("pop_placeholders({:?})", placeholder_map); let placeholder_regions: FxHashSet<_> = placeholder_map.values().cloned().collect(); - self.borrow_region_constraints() - .pop_placeholders( - &placeholder_regions, - &snapshot.region_constraints_snapshot, - ); + self.borrow_region_constraints().pop_placeholders(&placeholder_regions); self.universe.set(snapshot.universe); if !placeholder_map.is_empty() { self.projection_cache.borrow_mut().rollback_placeholder( diff --git a/src/librustc/infer/lexical_region_resolve/README.md b/src/librustc/infer/lexical_region_resolve/README.md index 6e1c419117..4483e522f3 100644 --- a/src/librustc/infer/lexical_region_resolve/README.md +++ b/src/librustc/infer/lexical_region_resolve/README.md @@ -3,7 +3,7 @@ > WARNING: This README is obsolete and will be removed soon! For > more info on how the current borrowck works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/mir/borrowck.html ## Terminology diff --git a/src/librustc/infer/lexical_region_resolve/graphviz.rs b/src/librustc/infer/lexical_region_resolve/graphviz.rs index 3b92d032bd..a210d63f12 100644 --- a/src/librustc/infer/lexical_region_resolve/graphviz.rs +++ b/src/librustc/infer/lexical_region_resolve/graphviz.rs @@ -230,10 +230,7 @@ impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { type Node = Node; type Edge = Edge<'tcx>; fn nodes(&self) -> dot::Nodes<'_, Node> { - let mut set = FxHashSet::default(); - for node in self.node_ids.keys() { - set.insert(*node); - } + let set = self.node_ids.keys().cloned().collect::>(); debug!("constraint graph has {} nodes", set.len()); set.into_iter().collect() } diff --git a/src/librustc/infer/lexical_region_resolve/mod.rs b/src/librustc/infer/lexical_region_resolve/mod.rs index 8f28e9a320..75f503d3bc 100644 --- a/src/librustc/infer/lexical_region_resolve/mod.rs +++ b/src/librustc/infer/lexical_region_resolve/mod.rs @@ -260,9 +260,7 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> { let tcx = self.tcx(); match (a, b) { - (&ty::ReCanonical(..), _) - | (_, &ty::ReCanonical(..)) - | (&ty::ReClosureBound(..), _) + (&ty::ReClosureBound(..), _) | (_, &ty::ReClosureBound(..)) | (&ReLateBound(..), _) | (_, &ReLateBound(..)) diff --git a/src/librustc/infer/lub.rs b/src/librustc/infer/lub.rs index 55c7eef607..8875b4169d 100644 --- a/src/librustc/infer/lub.rs +++ b/src/librustc/infer/lub.rs @@ -15,7 +15,6 @@ use super::Subtype; use traits::ObligationCause; use ty::{self, Ty, TyCtxt}; -use ty::error::TypeError; use ty::relate::{Relate, RelateResult, TypeRelation}; /// "Least upper bound" (common supertype) @@ -76,31 +75,12 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> where T: Relate<'tcx> { debug!("binders(a={:?}, b={:?})", a, b); - let was_error = self.infcx().probe(|_snapshot| { - // Subtle: use a fresh combine-fields here because we recover - // from Err. Doing otherwise could propagate obligations out - // through our `self.obligations` field. - self.infcx() - .combine_fields(self.fields.trace.clone(), self.fields.param_env) - .higher_ranked_lub(a, b, self.a_is_expected) - .is_err() - }); - debug!("binders: was_error={:?}", was_error); // When higher-ranked types are involved, computing the LUB is // very challenging, switch to invariance. This is obviously // overly conservative but works ok in practice. - match self.relate_with_variance(ty::Variance::Invariant, a, b) { - Ok(_) => Ok(a.clone()), - Err(err) => { - debug!("binders: error occurred, was_error={:?}", was_error); - if !was_error { - Err(TypeError::OldStyleLUB(Box::new(err))) - } else { - Err(err) - } - } - } + self.relate_with_variance(ty::Variance::Invariant, a, b)?; + Ok(a.clone()) } } diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index f5513acecf..d8beae45b0 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -227,7 +227,7 @@ pub struct InferCtxt<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { universe: Cell, } -/// A map returned by `replace_late_bound_regions_with_placeholders()` +/// A map returned by `replace_bound_vars_with_placeholders()` /// indicating the placeholder region that each late-bound region was /// replaced with. pub type PlaceholderMap<'tcx> = BTreeMap>; @@ -411,7 +411,7 @@ pub enum NLLRegionVariableOrigin { /// "Universal" instantiation of a higher-ranked region (e.g., /// from a `for<'a> T` binder). Meant to represent "any region". - Placeholder(ty::Placeholder), + Placeholder(ty::PlaceholderRegion), Existential, } @@ -790,7 +790,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.projection_cache .borrow_mut() - .commit(&projection_cache_snapshot); + .commit(projection_cache_snapshot); self.type_variables.borrow_mut().commit(type_snapshot); self.int_unification_table.borrow_mut().commit(int_snapshot); self.float_unification_table @@ -935,7 +935,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { b, }, placeholder_map, - ) = self.replace_late_bound_regions_with_placeholders(predicate); + ) = self.replace_bound_vars_with_placeholders(predicate); let cause_span = cause.span; let ok = self.at(cause, param_env).sub_exp(a_is_expected, a, b)?; @@ -952,7 +952,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ) -> UnitResult<'tcx> { self.commit_if_ok(|snapshot| { let (ty::OutlivesPredicate(r_a, r_b), placeholder_map) = - self.replace_late_bound_regions_with_placeholders(predicate); + self.replace_bound_vars_with_placeholders(predicate); let origin = SubregionOrigin::from_obligation_cause(cause, || { RelateRegionParamBound(cause.span) }); @@ -972,6 +972,17 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.mk_var(self.next_ty_var_id(false, origin)) } + pub fn next_ty_var_in_universe( + &self, + origin: TypeVariableOrigin, + universe: ty::UniverseIndex + ) -> Ty<'tcx> { + let vid = self.type_variables + .borrow_mut() + .new_var(universe, false, origin); + self.tcx.mk_var(vid) + } + pub fn next_diverging_ty_var(&self, origin: TypeVariableOrigin) -> Ty<'tcx> { self.tcx.mk_var(self.next_ty_var_id(true, origin)) } @@ -1160,10 +1171,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } /// Takes ownership of the list of variable regions. This implies - /// that all the region constriants have already been taken, and + /// that all the region constraints have already been taken, and /// hence that `resolve_regions_and_report_errors` can never be /// called. This is used only during NLL processing to "hand off" ownership - /// of the set of region vairables into the NLL region context. + /// of the set of region variables into the NLL region context. pub fn take_region_var_origins(&self) -> VarInfos { let (var_infos, data) = self.region_constraints .borrow_mut() @@ -1227,6 +1238,17 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } + /// If `TyVar(vid)` resolves to a type, return that type. Else, return the + /// universe index of `TyVar(vid)`. + pub fn probe_ty_var(&self, vid: TyVid) -> Result, ty::UniverseIndex> { + use self::type_variable::TypeVariableValue; + + match self.type_variables.borrow_mut().probe(vid) { + TypeVariableValue::Known { value } => Ok(value), + TypeVariableValue::Unknown { universe } => Err(universe), + } + } + pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> { self.inlined_shallow_resolve(typ) } @@ -1328,18 +1350,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.report_and_explain_type_error(trace, &err) } - pub fn replace_late_bound_regions_with_fresh_var( + pub fn replace_bound_vars_with_fresh_vars( &self, span: Span, lbrct: LateBoundRegionConversionTime, - value: &ty::Binder, + value: &ty::Binder ) -> (T, BTreeMap>) where - T: TypeFoldable<'tcx>, + T: TypeFoldable<'tcx> { - self.tcx.replace_late_bound_regions(value, |br| { - self.next_region_var(LateBoundRegion(span, br, lbrct)) - }) + let fld_r = |br| self.next_region_var(LateBoundRegion(span, br, lbrct)); + let fld_t = |_| self.next_ty_var(TypeVariableOrigin::MiscVariable(span)); + self.tcx.replace_bound_vars(value, fld_r, fld_t) } /// Given a higher-ranked projection predicate like: @@ -1478,7 +1500,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } /// Clears the selection, evaluation, and projection caches. This is useful when - /// repeatedly attemping to select an Obligation while changing only + /// repeatedly attempting to select an Obligation while changing only /// its ParamEnv, since FulfillmentContext doesn't use 'probe' pub fn clear_caches(&self) { self.selection_cache.clear(); diff --git a/src/librustc/infer/nll_relate/mod.rs b/src/librustc/infer/nll_relate/mod.rs index e003c1989e..972ba16f7e 100644 --- a/src/librustc/infer/nll_relate/mod.rs +++ b/src/librustc/infer/nll_relate/mod.rs @@ -11,30 +11,41 @@ //! This code is kind of an alternate way of doing subtyping, //! supertyping, and type equating, distinct from the `combine.rs` //! code but very similar in its effect and design. Eventually the two -//! ought to be merged. This code is intended for use in NLL. +//! ought to be merged. This code is intended for use in NLL and chalk. //! //! Here are the key differences: //! -//! - This code generally assumes that there are no unbound type -//! inferences variables, because at NLL -//! time types are fully inferred up-to regions. -//! - Actually, to support user-given type annotations like -//! `Vec<_>`, we do have some measure of support for type -//! inference variables, but we impose some simplifying -//! assumptions on them that would not be suitable for the infer -//! code more generally. This could be fixed. +//! - This code may choose to bypass some checks (e.g. the occurs check) +//! in the case where we know that there are no unbound type inference +//! variables. This is the case for NLL, because at NLL time types are fully +//! inferred up-to regions. //! - This code uses "universes" to handle higher-ranked regions and //! not the leak-check. This is "more correct" than what rustc does //! and we are generally migrating in this direction, but NLL had to //! get there first. +//! +//! Also, this code assumes that there are no bound types at all, not even +//! free ones. This is ok because: +//! - we are not relating anything quantified over some type variable +//! - we will have instantiated all the bound type vars already (the one +//! thing we relate in chalk are basically domain goals and their +//! constituents) use crate::infer::InferCtxt; use crate::ty::fold::{TypeFoldable, TypeVisitor}; use crate::ty::relate::{self, Relate, RelateResult, TypeRelation}; use crate::ty::subst::Kind; use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::error::TypeError; +use crate::traits::DomainGoal; use rustc_data_structures::fx::FxHashMap; +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum NormalizationStrategy { + Lazy, + Eager, +} + pub struct TypeRelating<'me, 'gcx: 'tcx, 'tcx: 'me, D> where D: TypeRelatingDelegate<'tcx>, @@ -75,6 +86,10 @@ pub trait TypeRelatingDelegate<'tcx> { /// delegate. fn push_outlives(&mut self, sup: ty::Region<'tcx>, sub: ty::Region<'tcx>); + /// Push a domain goal that will need to be proved for the two types to + /// be related. Used for lazy normalization. + fn push_domain_goal(&mut self, domain_goal: DomainGoal<'tcx>); + /// Creates a new universe index. Used when instantiating placeholders. fn create_next_universe(&mut self) -> ty::UniverseIndex; @@ -95,7 +110,7 @@ pub trait TypeRelatingDelegate<'tcx> { /// So e.g. if you have `for<'a> fn(..) <: for<'b> fn(..)`, then /// we will invoke this method to instantiate `'b` with a /// placeholder region. - fn next_placeholder_region(&mut self, placeholder: ty::Placeholder) -> ty::Region<'tcx>; + fn next_placeholder_region(&mut self, placeholder: ty::PlaceholderRegion) -> ty::Region<'tcx>; /// Creates a new existential region in the given universe. This /// is used when handling subtyping and type variables -- if we @@ -105,6 +120,13 @@ pub trait TypeRelatingDelegate<'tcx> { /// relate `Foo<'?0>` with `Foo<'a>` (and probably add an outlives /// relation stating that `'?0: 'a`). fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx>; + + /// Define the normalization strategy to use, eager or lazy. + fn normalization() -> NormalizationStrategy; + + /// Enable some optimizations if we do not expect inference variables + /// in the RHS of the relation. + fn forbid_inference_vars() -> bool; } #[derive(Clone, Debug)] @@ -176,7 +198,7 @@ where universe }); - let placeholder = ty::Placeholder { universe, name: br }; + let placeholder = ty::PlaceholderRegion { universe, name: br }; delegate.next_placeholder_region(placeholder) } else { delegate.next_existential_region_var() @@ -242,15 +264,79 @@ where self.delegate.push_outlives(sup, sub); } - /// When we encounter a canonical variable `var` in the output, - /// equate it with `kind`. If the variable has been previously - /// equated, then equate it again. - fn relate_var(&mut self, var_ty: Ty<'tcx>, value_ty: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - debug!("equate_var(var_ty={:?}, value_ty={:?})", var_ty, value_ty); + /// Relate a projection type and some value type lazily. This will always + /// succeed, but we push an additional `ProjectionEq` goal depending + /// on the value type: + /// - if the value type is any type `T` which is not a projection, we push + /// `ProjectionEq(projection = T)`. + /// - if the value type is another projection `other_projection`, we create + /// a new inference variable `?U` and push the two goals + /// `ProjectionEq(projection = ?U)`, `ProjectionEq(other_projection = ?U)`. + fn relate_projection_ty( + &mut self, + projection_ty: ty::ProjectionTy<'tcx>, + value_ty: ty::Ty<'tcx> + ) -> Ty<'tcx> { + use crate::infer::type_variable::TypeVariableOrigin; + use crate::traits::WhereClause; + use syntax_pos::DUMMY_SP; - let generalized_ty = self.generalize_value(value_ty); - self.infcx - .force_instantiate_unchecked(var_ty, generalized_ty); + match value_ty.sty { + ty::Projection(other_projection_ty) => { + let var = self.infcx.next_ty_var(TypeVariableOrigin::MiscVariable(DUMMY_SP)); + self.relate_projection_ty(projection_ty, var); + self.relate_projection_ty(other_projection_ty, var); + var + } + + _ => { + let projection = ty::ProjectionPredicate { + projection_ty, + ty: value_ty, + }; + self.delegate.push_domain_goal( + DomainGoal::Holds(WhereClause::ProjectionEq(projection)) + ); + value_ty + } + } + } + + /// Relate a type inference variable with a value type. + fn relate_ty_var( + &mut self, + vid: ty::TyVid, + value_ty: Ty<'tcx> + ) -> RelateResult<'tcx, Ty<'tcx>> { + debug!("relate_ty_var(vid={:?}, value_ty={:?})", vid, value_ty); + + match value_ty.sty { + ty::Infer(ty::TyVar(value_vid)) => { + // Two type variables: just equate them. + self.infcx.type_variables.borrow_mut().equate(vid, value_vid); + return Ok(value_ty); + } + + ty::Projection(projection_ty) + if D::normalization() == NormalizationStrategy::Lazy => + { + return Ok(self.relate_projection_ty(projection_ty, self.infcx.tcx.mk_var(vid))); + } + + _ => (), + } + + let generalized_ty = self.generalize_value(value_ty, vid)?; + debug!("relate_ty_var: generalized_ty = {:?}", generalized_ty); + + if D::forbid_inference_vars() { + // In NLL, we don't have type inference variables + // floating around, so we can do this rather imprecise + // variant of the occurs-check. + assert!(!generalized_ty.has_infer_types()); + } + + self.infcx.type_variables.borrow_mut().instantiate(vid, generalized_ty); // The generalized values we extract from `canonical_var_values` have // been fully instantiated and hence the set of scopes we have @@ -264,22 +350,27 @@ where // Restore the old scopes now. self.a_scopes = old_a_scopes; - debug!("equate_var: complete, result = {:?}", result); + debug!("relate_ty_var: complete, result = {:?}", result); result } - fn generalize_value>(&mut self, value: T) -> T { - TypeGeneralizer { - tcx: self.infcx.tcx, + fn generalize_value>( + &mut self, + value: T, + for_vid: ty::TyVid + ) -> RelateResult<'tcx, T> { + let universe = self.infcx.probe_ty_var(for_vid).unwrap_err(); + + let mut generalizer = TypeGeneralizer { + infcx: self.infcx, delegate: &mut self.delegate, first_free_index: ty::INNERMOST, ambient_variance: self.ambient_variance, + for_vid_sub_root: self.infcx.type_variables.borrow_mut().sub_root_var(for_vid), + universe, + }; - // These always correspond to an `_` or `'_` written by - // user, and those are always in the root universe. - universe: ty::UniverseIndex::ROOT, - }.relate(&value, &value) - .unwrap() + generalizer.relate(&value, &value) } } @@ -327,11 +418,35 @@ where Ok(r) } - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + fn tys(&mut self, a: Ty<'tcx>, mut b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { let a = self.infcx.shallow_resolve(a); - match a.sty { - ty::Infer(ty::TyVar(_)) | ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) => { - self.relate_var(a.into(), b.into()) + + if !D::forbid_inference_vars() { + b = self.infcx.shallow_resolve(b); + } + + match (&a.sty, &b.sty) { + (_, &ty::Infer(ty::TyVar(vid))) => { + if D::forbid_inference_vars() { + // Forbid inference variables in the RHS. + bug!("unexpected inference var {:?}", b) + } else { + self.relate_ty_var(vid, a) + } + } + + (&ty::Infer(ty::TyVar(vid)), _) => self.relate_ty_var(vid, b), + + (&ty::Projection(projection_ty), _) + if D::normalization() == NormalizationStrategy::Lazy => + { + Ok(self.relate_projection_ty(projection_ty, b)) + } + + (_, &ty::Projection(projection_ty)) + if D::normalization() == NormalizationStrategy::Lazy => + { + Ok(self.relate_projection_ty(projection_ty, a)) } _ => { @@ -340,7 +455,8 @@ where a, b, self.ambient_variance ); - relate::super_relate_tys(self, a, b) + // Will also handle unification of `IntVar` and `FloatVar`. + self.infcx.super_combine_tys(self, a, b) } } } @@ -551,7 +667,7 @@ struct TypeGeneralizer<'me, 'gcx: 'tcx, 'tcx: 'me, D> where D: TypeRelatingDelegate<'tcx> + 'me, { - tcx: TyCtxt<'me, 'gcx, 'tcx>, + infcx: &'me InferCtxt<'me, 'gcx, 'tcx>, delegate: &'me mut D, @@ -561,6 +677,14 @@ where first_free_index: ty::DebruijnIndex, + /// The vid of the type variable that is in the process of being + /// instantiated. If we find this within the value we are folding, + /// that means we would have created a cyclic value. + for_vid_sub_root: ty::TyVid, + + /// The universe of the type variable that is in the process of being + /// instantiated. If we find anything that this universe cannot name, + /// we reject the relation. universe: ty::UniverseIndex, } @@ -569,7 +693,7 @@ where D: TypeRelatingDelegate<'tcx>, { fn tcx(&self) -> TyCtxt<'me, 'gcx, 'tcx> { - self.tcx + self.infcx.tcx } fn tag(&self) -> &'static str { @@ -609,17 +733,84 @@ where } fn tys(&mut self, a: Ty<'tcx>, _: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + use crate::infer::type_variable::TypeVariableValue; + debug!("TypeGeneralizer::tys(a={:?})", a,); match a.sty { - ty::Infer(ty::TyVar(_)) | ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) => { + ty::Infer(ty::TyVar(_)) | ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) + if D::forbid_inference_vars() => + { bug!( "unexpected inference variable encountered in NLL generalization: {:?}", a ); } - _ => relate::super_relate_tys(self, a, a), + ty::Infer(ty::TyVar(vid)) => { + let mut variables = self.infcx.type_variables.borrow_mut(); + let vid = variables.root_var(vid); + let sub_vid = variables.sub_root_var(vid); + if sub_vid == self.for_vid_sub_root { + // If sub-roots are equal, then `for_vid` and + // `vid` are related via subtyping. + debug!("TypeGeneralizer::tys: occurs check failed"); + return Err(TypeError::Mismatch); + } else { + match variables.probe(vid) { + TypeVariableValue::Known { value: u } => { + drop(variables); + self.relate(&u, &u) + } + TypeVariableValue::Unknown { universe: _universe } => { + if self.ambient_variance == ty::Bivariant { + // FIXME: we may need a WF predicate (related to #54105). + } + + let origin = *variables.var_origin(vid); + + // Replacing with a new variable in the universe `self.universe`, + // it will be unified later with the original type variable in + // the universe `_universe`. + let new_var_id = variables.new_var(self.universe, false, origin); + + let u = self.tcx().mk_var(new_var_id); + debug!( + "generalize: replacing original vid={:?} with new={:?}", + vid, + u + ); + return Ok(u); + } + } + } + } + + ty::Infer(ty::IntVar(_)) | + ty::Infer(ty::FloatVar(_)) => { + // No matter what mode we are in, + // integer/floating-point types must be equal to be + // relatable. + Ok(a) + } + + ty::Placeholder(placeholder) => { + if self.universe.cannot_name(placeholder.universe) { + debug!( + "TypeGeneralizer::tys: root universe {:?} cannot name\ + placeholder in universe {:?}", + self.universe, + placeholder.universe + ); + Err(TypeError::Mismatch) + } else { + Ok(a) + } + } + + _ => { + relate::super_relate_tys(self, a, a) + } } } @@ -673,64 +864,3 @@ where Ok(ty::Binder::bind(result)) } } - -impl InferCtxt<'_, '_, 'tcx> { - /// A hacky sort of method used by the NLL type-relating code: - /// - /// - `var` must be some unbound type variable. - /// - `value` must be a suitable type to use as its value. - /// - /// `var` will then be equated with `value`. Note that this - /// sidesteps a number of important checks, such as the "occurs - /// check" that prevents cyclic types, so it is important not to - /// use this method during regular type-check. - fn force_instantiate_unchecked(&self, var: Ty<'tcx>, value: Ty<'tcx>) { - match (&var.sty, &value.sty) { - (&ty::Infer(ty::TyVar(vid)), _) => { - let mut type_variables = self.type_variables.borrow_mut(); - - // In NLL, we don't have type inference variables - // floating around, so we can do this rather imprecise - // variant of the occurs-check. - assert!(!value.has_infer_types()); - - type_variables.instantiate(vid, value); - } - - (&ty::Infer(ty::IntVar(vid)), &ty::Int(value)) => { - let mut int_unification_table = self.int_unification_table.borrow_mut(); - int_unification_table - .unify_var_value(vid, Some(ty::IntVarValue::IntType(value))) - .unwrap_or_else(|_| { - bug!("failed to unify int var `{:?}` with `{:?}`", vid, value); - }); - } - - (&ty::Infer(ty::IntVar(vid)), &ty::Uint(value)) => { - let mut int_unification_table = self.int_unification_table.borrow_mut(); - int_unification_table - .unify_var_value(vid, Some(ty::IntVarValue::UintType(value))) - .unwrap_or_else(|_| { - bug!("failed to unify int var `{:?}` with `{:?}`", vid, value); - }); - } - - (&ty::Infer(ty::FloatVar(vid)), &ty::Float(value)) => { - let mut float_unification_table = self.float_unification_table.borrow_mut(); - float_unification_table - .unify_var_value(vid, Some(ty::FloatVarValue(value))) - .unwrap_or_else(|_| { - bug!("failed to unify float var `{:?}` with `{:?}`", vid, value) - }); - } - - _ => { - bug!( - "force_instantiate_unchecked invoked with bad combination: var={:?} value={:?}", - var, - value, - ); - } - } - } -} diff --git a/src/librustc/infer/opaque_types/mod.rs b/src/librustc/infer/opaque_types/mod.rs index 4985897241..fda9817cc8 100644 --- a/src/librustc/infer/opaque_types/mod.rs +++ b/src/librustc/infer/opaque_types/mod.rs @@ -121,7 +121,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { parent_def_id, body_id, param_env, - opaque_types: DefIdMap(), + opaque_types: Default::default(), obligations: vec![], }; let value = instantiator.instantiate_opaque_types_in_map(value); @@ -366,7 +366,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let mut types = vec![concrete_ty]; let bound_region = |r| self.sub_regions(infer::CallReturn(span), least_region, r); while let Some(ty) = types.pop() { - let mut components = self.tcx.outlives_components(ty); + let mut components = smallvec![]; + self.tcx.push_outlives_components(ty, &mut components); while let Some(component) = components.pop() { match component { Component::Region(r) => { diff --git a/src/librustc/infer/outlives/obligations.rs b/src/librustc/infer/outlives/obligations.rs index 0215b0380b..502a5828f3 100644 --- a/src/librustc/infer/outlives/obligations.rs +++ b/src/librustc/infer/outlives/obligations.rs @@ -9,7 +9,7 @@ // except according to those terms. //! Code that handles "type-outlives" constraints like `T: 'a`. This -//! is based on the `outlives_components` function defined on the tcx, +//! is based on the `push_outlives_components` function defined on the tcx, //! but it adds a bit of heuristics on top, in particular to deal with //! associated types and projections. //! @@ -132,7 +132,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// /// See the `region_obligations` field of `InferCtxt` for some /// comments about how this function fits into the overall expected - /// flow of the the inferencer. The key point is that it is + /// flow of the inferencer. The key point is that it is /// invoked after all type-inference variables have been bound -- /// towards the end of regionck. This also ensures that the /// region-bound-pairs are available (see comments above regarding @@ -305,19 +305,20 @@ where ty, region, origin ); - assert!(!ty.has_escaping_regions()); + assert!(!ty.has_escaping_bound_vars()); - let components = self.tcx.outlives_components(ty); - self.components_must_outlive(origin, components, region); + let mut components = smallvec![]; + self.tcx.push_outlives_components(ty, &mut components); + self.components_must_outlive(origin, &components, region); } fn components_must_outlive( &mut self, origin: infer::SubregionOrigin<'tcx>, - components: Vec>, + components: &[Component<'tcx>], region: ty::Region<'tcx>, ) { - for component in components { + for component in components.iter() { let origin = origin.clone(); match component { Component::Region(region1) => { @@ -325,13 +326,13 @@ where .push_sub_region_constraint(origin, region, region1); } Component::Param(param_ty) => { - self.param_ty_must_outlive(origin, region, param_ty); + self.param_ty_must_outlive(origin, region, *param_ty); } Component::Projection(projection_ty) => { - self.projection_must_outlive(origin, region, projection_ty); + self.projection_must_outlive(origin, region, *projection_ty); } Component::EscapingProjection(subcomponents) => { - self.components_must_outlive(origin, subcomponents, region); + self.components_must_outlive(origin, &subcomponents, region); } Component::UnresolvedInferenceVariable(v) => { // ignore this, we presume it will yield an error diff --git a/src/librustc/infer/outlives/verify.rs b/src/librustc/infer/outlives/verify.rs index e1db295b7e..5fee04341b 100644 --- a/src/librustc/infer/outlives/verify.rs +++ b/src/librustc/infer/outlives/verify.rs @@ -155,7 +155,8 @@ impl<'cx, 'gcx, 'tcx> VerifyBoundCx<'cx, 'gcx, 'tcx> { .map(|subty| self.type_bound(subty)) .collect::>(); - let mut regions = ty.regions(); + let mut regions = smallvec![]; + ty.push_regions(&mut regions); regions.retain(|r| !r.is_late_bound()); // ignore late-bound regions bounds.push(VerifyBound::AllBounds( regions @@ -298,8 +299,8 @@ impl<'cx, 'gcx, 'tcx> VerifyBoundCx<'cx, 'gcx, 'tcx> { let assoc_item = tcx.associated_item(assoc_item_def_id); let trait_def_id = assoc_item.container.assert_trait(); let trait_predicates = tcx.predicates_of(trait_def_id).predicates - .into_iter() - .map(|(p, _)| p) + .iter() + .map(|(p, _)| *p) .collect(); let identity_substs = Substs::identity_for_item(tcx, assoc_item_def_id); let identity_proj = tcx.mk_projection(assoc_item_def_id, identity_substs); @@ -323,7 +324,7 @@ impl<'cx, 'gcx, 'tcx> VerifyBoundCx<'cx, 'gcx, 'tcx> { predicates .into_iter() .filter_map(|p| p.as_ref().to_opt_type_outlives()) - .filter_map(|p| p.no_late_bound_regions()) + .filter_map(|p| p.no_bound_vars()) .filter(move |p| compare_ty(p.0)) } } diff --git a/src/librustc/infer/region_constraints/README.md b/src/librustc/infer/region_constraints/README.md index 61603e6dee..775bbf955b 100644 --- a/src/librustc/infer/region_constraints/README.md +++ b/src/librustc/infer/region_constraints/README.md @@ -3,7 +3,7 @@ > WARNING: This README is obsolete and will be removed soon! For > more info on how the current borrowck works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/mir/borrowck.html ## Terminology @@ -18,7 +18,7 @@ constraints over the course of a function. Finally, at the end of processing a function, we process and solve the constraints all at once. -[ti]: https://rust-lang-nursery.github.io/rustc-guide/type-inference.html +[ti]: https://rust-lang.github.io/rustc-guide/type-inference.html The constraints are always of one of three possible forms: diff --git a/src/librustc/infer/region_constraints/mod.rs b/src/librustc/infer/region_constraints/mod.rs index c82603bf56..af1b6964b8 100644 --- a/src/librustc/infer/region_constraints/mod.rs +++ b/src/librustc/infer/region_constraints/mod.rs @@ -11,7 +11,7 @@ //! See README.md use self::CombineMapType::*; -use self::UndoLogEntry::*; +use self::UndoLog::*; use super::unify_key; use super::{MiscVariable, RegionVariableOrigin, SubregionOrigin}; @@ -29,6 +29,7 @@ use std::{cmp, fmt, mem, u32}; mod taint; +#[derive(Default)] pub struct RegionConstraintCollector<'tcx> { /// For each `RegionVid`, the corresponding `RegionVariableOrigin`. var_infos: IndexVec, @@ -51,14 +52,17 @@ pub struct RegionConstraintCollector<'tcx> { /// The undo log records actions that might later be undone. /// - /// Note: when the undo_log is empty, we are not actively + /// Note: `num_open_snapshots` is used to track if we are actively /// snapshotting. When the `start_snapshot()` method is called, we - /// push an OpenSnapshot entry onto the list to indicate that we - /// are now actively snapshotting. The reason for this is that - /// otherwise we end up adding entries for things like the lower - /// bound on a variable and so forth, which can never be rolled - /// back. - undo_log: Vec>, + /// increment `num_open_snapshots` to indicate that we are now actively + /// snapshotting. The reason for this is that otherwise we end up adding + /// entries for things like the lower bound on a variable and so forth, + /// which can never be rolled back. + undo_log: Vec>, + + /// The number of open snapshots, i.e. those that haven't been committed or + /// rolled back. + num_open_snapshots: usize, /// When we add a R1 == R2 constriant, we currently add (a) edges /// R1 <= R2 and R2 <= R1 and (b) we unify the two regions in this @@ -253,15 +257,7 @@ struct TwoRegions<'tcx> { } #[derive(Copy, Clone, PartialEq)] -enum UndoLogEntry<'tcx> { - /// Pushed when we start a snapshot. - OpenSnapshot, - - /// Replaces an `OpenSnapshot` when a snapshot is committed, but - /// that snapshot is not the root. If the root snapshot is - /// unrolled, all nested snapshots must be committed. - CommitedSnapshot, - +enum UndoLog<'tcx> { /// We added `RegionVid` AddVar(RegionVid), @@ -341,17 +337,8 @@ impl TaintDirections { } impl<'tcx> RegionConstraintCollector<'tcx> { - pub fn new() -> RegionConstraintCollector<'tcx> { - RegionConstraintCollector { - var_infos: VarInfos::default(), - data: RegionConstraintData::default(), - lubs: Default::default(), - glbs: Default::default(), - bound_count: 0, - undo_log: Vec::new(), - unification_table: ut::UnificationTable::new(), - any_unifications: false, - } + pub fn new() -> Self { + Self::default() } pub fn num_region_vars(&self) -> usize { @@ -395,6 +382,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { glbs, bound_count: _, undo_log: _, + num_open_snapshots: _, unification_table, any_unifications, } = self; @@ -423,13 +411,13 @@ impl<'tcx> RegionConstraintCollector<'tcx> { } fn in_snapshot(&self) -> bool { - !self.undo_log.is_empty() + self.num_open_snapshots > 0 } pub fn start_snapshot(&mut self) -> RegionSnapshot { let length = self.undo_log.len(); debug!("RegionConstraintCollector: start_snapshot({})", length); - self.undo_log.push(OpenSnapshot); + self.num_open_snapshots += 1; RegionSnapshot { length, region_snapshot: self.unification_table.snapshot(), @@ -437,39 +425,46 @@ impl<'tcx> RegionConstraintCollector<'tcx> { } } + fn assert_open_snapshot(&self, snapshot: &RegionSnapshot) { + assert!(self.undo_log.len() >= snapshot.length); + assert!(self.num_open_snapshots > 0); + } + pub fn commit(&mut self, snapshot: RegionSnapshot) { debug!("RegionConstraintCollector: commit({})", snapshot.length); - assert!(self.undo_log.len() > snapshot.length); - assert!(self.undo_log[snapshot.length] == OpenSnapshot); + self.assert_open_snapshot(&snapshot); - if snapshot.length == 0 { + if self.num_open_snapshots == 1 { + // The root snapshot. It's safe to clear the undo log because + // there's no snapshot further out that we might need to roll back + // to. + assert!(snapshot.length == 0); self.undo_log.clear(); - } else { - (*self.undo_log)[snapshot.length] = CommitedSnapshot; } + + self.num_open_snapshots -= 1; + self.unification_table.commit(snapshot.region_snapshot); } pub fn rollback_to(&mut self, snapshot: RegionSnapshot) { debug!("RegionConstraintCollector: rollback_to({:?})", snapshot); - assert!(self.undo_log.len() > snapshot.length); - assert!(self.undo_log[snapshot.length] == OpenSnapshot); - while self.undo_log.len() > snapshot.length + 1 { + self.assert_open_snapshot(&snapshot); + + while self.undo_log.len() > snapshot.length { let undo_entry = self.undo_log.pop().unwrap(); self.rollback_undo_entry(undo_entry); } - let c = self.undo_log.pop().unwrap(); - assert!(c == OpenSnapshot); + + self.num_open_snapshots -= 1; + self.unification_table.rollback_to(snapshot.region_snapshot); self.any_unifications = snapshot.any_unifications; } - fn rollback_undo_entry(&mut self, undo_entry: UndoLogEntry<'tcx>) { + fn rollback_undo_entry(&mut self, undo_entry: UndoLog<'tcx>) { match undo_entry { - OpenSnapshot => { - panic!("Failure to observe stack discipline"); - } - Purged | CommitedSnapshot => { + Purged => { // nothing to do here } AddVar(vid) => { @@ -529,15 +524,10 @@ impl<'tcx> RegionConstraintCollector<'tcx> { /// in `skols`. This is used after a higher-ranked operation /// completes to remove all trace of the placeholder regions /// created in that time. - pub fn pop_placeholders( - &mut self, - placeholders: &FxHashSet>, - snapshot: &RegionSnapshot, - ) { + pub fn pop_placeholders(&mut self, placeholders: &FxHashSet>) { debug!("pop_placeholders(placeholders={:?})", placeholders); assert!(self.in_snapshot()); - assert!(self.undo_log[snapshot.length] == OpenSnapshot); let constraints_to_kill: Vec = self.undo_log .iter() @@ -556,7 +546,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { fn kill_constraint<'tcx>( placeholders: &FxHashSet>, - undo_entry: &UndoLogEntry<'tcx>, + undo_entry: &UndoLog<'tcx>, ) -> bool { match undo_entry { &AddConstraint(Constraint::VarSubVar(..)) => false, @@ -570,7 +560,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { &AddCombination(_, ref two_regions) => { placeholders.contains(&two_regions.a) || placeholders.contains(&two_regions.b) } - &AddVar(..) | &OpenSnapshot | &Purged | &CommitedSnapshot => false, + &AddVar(..) | &Purged => false, } } } @@ -833,10 +823,6 @@ impl<'tcx> RegionConstraintCollector<'tcx> { ty::RePlaceholder(placeholder) => placeholder.universe, ty::ReClosureBound(vid) | ty::ReVar(vid) => self.var_universe(vid), ty::ReLateBound(..) => bug!("universe(): encountered bound region {:?}", region), - ty::ReCanonical(..) => bug!( - "region_universe(): encountered canonical region {:?}", - region - ), } } diff --git a/src/librustc/infer/region_constraints/taint.rs b/src/librustc/infer/region_constraints/taint.rs index ef7365276f..27ce7f1060 100644 --- a/src/librustc/infer/region_constraints/taint.rs +++ b/src/librustc/infer/region_constraints/taint.rs @@ -29,7 +29,7 @@ impl<'tcx> TaintSet<'tcx> { pub(super) fn fixed_point( &mut self, tcx: TyCtxt<'_, '_, 'tcx>, - undo_log: &[UndoLogEntry<'tcx>], + undo_log: &[UndoLog<'tcx>], verifys: &[Verify<'tcx>], ) { let mut prev_len = 0; @@ -65,8 +65,7 @@ impl<'tcx> TaintSet<'tcx> { "we never add verifications while doing higher-ranked things", ) } - &Purged | &AddCombination(..) | &AddVar(..) | &OpenSnapshot - | &CommitedSnapshot => {} + &Purged | &AddCombination(..) | &AddVar(..) => {} } } } diff --git a/src/librustc/infer/resolve.rs b/src/librustc/infer/resolve.rs index 0ef9761857..7a1ee85acd 100644 --- a/src/librustc/infer/resolve.rs +++ b/src/librustc/infer/resolve.rs @@ -25,8 +25,9 @@ pub struct OpportunisticTypeResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> OpportunisticTypeResolver<'a, 'gcx, 'tcx> { + #[inline] pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self { - OpportunisticTypeResolver { infcx: infcx } + OpportunisticTypeResolver { infcx } } } @@ -54,7 +55,7 @@ pub struct OpportunisticTypeAndRegionResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { impl<'a, 'gcx, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'gcx, 'tcx> { pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self { - OpportunisticTypeAndRegionResolver { infcx: infcx } + OpportunisticTypeAndRegionResolver { infcx } } } diff --git a/src/librustc/infer/sub.rs b/src/librustc/infer/sub.rs index 048810c042..3b0f9a5e54 100644 --- a/src/librustc/infer/sub.rs +++ b/src/librustc/infer/sub.rs @@ -84,8 +84,8 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> // Shouldn't have any LBR here, so we can safely put // this under a binder below without fear of accidental // capture. - assert!(!a.has_escaping_regions()); - assert!(!b.has_escaping_regions()); + assert!(!a.has_escaping_bound_vars()); + assert!(!b.has_escaping_bound_vars()); // can't make progress on `A <: B` if both A and B are // type variables, so record an obligation. We also diff --git a/src/librustc/infer/type_variable.rs b/src/librustc/infer/type_variable.rs index 970b6e096f..5624961ea6 100644 --- a/src/librustc/infer/type_variable.rs +++ b/src/librustc/infer/type_variable.rs @@ -72,7 +72,7 @@ pub type TypeVariableMap = FxHashMap; struct TypeVariableData { origin: TypeVariableOrigin, - diverging: bool + diverging: bool, } #[derive(Copy, Clone, Debug)] @@ -169,7 +169,7 @@ impl<'tcx> TypeVariableTable<'tcx> { // Hack: we only need this so that `types_escaping_snapshot` // can see what has been unified; see the Delegate impl for // more details. - self.values.record(Instantiate { vid: vid }); + self.values.record(Instantiate { vid }); } /// Creates a new type variable. @@ -320,7 +320,7 @@ impl<'tcx> TypeVariableTable<'tcx> { /// but which have only been unified since `s` started, and /// return the types with which they were unified. So if we had /// a type variable `V0`, then we started the snapshot, then we - /// created a type variable `V1`, unifed `V0` with `T0`, and + /// created a type variable `V1`, unified `V0` with `T0`, and /// unified `V1` with `T1`, this function would return `{T0}`. pub fn types_escaping_snapshot(&mut self, s: &Snapshot<'tcx>) -> Vec> { let mut new_elem_threshold = u32::MAX; diff --git a/src/librustc/infer/unify_key.rs b/src/librustc/infer/unify_key.rs index cdc92877a5..f8001e085c 100644 --- a/src/librustc/infer/unify_key.rs +++ b/src/librustc/infer/unify_key.rs @@ -43,7 +43,7 @@ impl UnifyValue for RegionVidKey { value2.min_vid }; - Ok(RegionVidKey { min_vid: min_vid }) + Ok(RegionVidKey { min_vid }) } } diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index d8defabd3f..ddb0c5bf22 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -30,7 +30,7 @@ //! //! For more information about how rustc works, see the [rustc guide]. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/ //! //! # Note //! @@ -42,7 +42,6 @@ #![feature(box_patterns)] #![feature(box_syntax)] -#![cfg_attr(stage0, feature(min_const_fn))] #![feature(core_intrinsics)] #![feature(drain_filter)] #![cfg_attr(windows, feature(libc))] @@ -67,9 +66,7 @@ #![feature(step_trait)] #![feature(integer_atomics)] #![feature(test)] -#![cfg_attr(stage0, feature(impl_header_lifetime_elision))] #![feature(in_band_lifetimes)] -#![feature(macro_at_most_once_rep)] #![feature(crate_visibility_modifier)] #![feature(transpose_result)] diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 266b1c4d4a..22f2023eef 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -300,12 +300,6 @@ declare_lint! { "detects labels that are never used" } -declare_lint! { - pub DUPLICATE_ASSOCIATED_TYPE_BINDINGS, - Warn, - "warns about duplicate associated type bindings in generics" -} - declare_lint! { pub DUPLICATE_MACRO_EXPORTS, Deny, @@ -324,6 +318,12 @@ declare_lint! { "warn about missing code example in an item's documentation" } +declare_lint! { + pub PRIVATE_DOC_TESTS, + Allow, + "warn about doc test in private item" +} + declare_lint! { pub WHERE_CLAUSES_OBJECT_SAFETY, Warn, @@ -418,10 +418,10 @@ impl LintPass for HardwiredLints { ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, UNSTABLE_NAME_COLLISIONS, IRREFUTABLE_LET_PATTERNS, - DUPLICATE_ASSOCIATED_TYPE_BINDINGS, DUPLICATE_MACRO_EXPORTS, INTRA_DOC_LINK_RESOLUTION_FAILURE, MISSING_DOC_CODE_EXAMPLES, + PRIVATE_DOC_TESTS, WHERE_CLAUSES_OBJECT_SAFETY, PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, MACRO_USE_EXTERN_CRATE, diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index ebb495fdfc..469d77403a 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -783,11 +783,11 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> { } } -impl<'a, 'tcx> LayoutOf for &'a LateContext<'a, 'tcx> { +impl<'a, 'tcx> LayoutOf for LateContext<'a, 'tcx> { type Ty = Ty<'tcx>; type TyLayout = Result, LayoutError<'tcx>>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(self.param_env.and(ty)) } } @@ -1233,7 +1233,7 @@ pub fn check_ast_crate( let (passes, buffered) = if pre_expansion { ( sess.lint_store.borrow_mut().pre_expansion_passes.take(), - LintBuffer::new(), + LintBuffer::default(), ) } else { ( diff --git a/src/librustc/lint/levels.rs b/src/librustc/lint/levels.rs index 6a4f734674..cfb9f04c4c 100644 --- a/src/librustc/lint/levels.rs +++ b/src/librustc/lint/levels.rs @@ -21,6 +21,7 @@ use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, use session::Session; use syntax::ast; use syntax::attr; +use syntax::feature_gate; use syntax::source_map::MultiSpan; use syntax::symbol::Symbol; use util::nodemap::FxHashMap; @@ -188,7 +189,7 @@ impl<'a> LintLevelsBuilder<'a> { /// This function will perform a number of tasks: /// /// * It'll validate all lint-related attributes in `attrs` - /// * It'll mark all lint-related attriutes as used + /// * It'll mark all lint-related attributes as used /// * Lint levels will be updated based on the attributes provided /// * Lint attributes are validated, e.g. a #[forbid] can't be switched to /// #[allow] @@ -199,8 +200,7 @@ impl<'a> LintLevelsBuilder<'a> { let store = self.sess.lint_store.borrow(); let sess = self.sess; let bad_attr = |span| { - span_err!(sess, span, E0452, - "malformed lint attribute"); + struct_span_err!(sess, span, E0452, "malformed lint attribute") }; for attr in attrs { let level = match Level::from_str(&attr.name().as_str()) { @@ -211,19 +211,76 @@ impl<'a> LintLevelsBuilder<'a> { let meta = unwrap_or!(attr.meta(), continue); attr::mark_used(attr); - let metas = if let Some(metas) = meta.meta_item_list() { + let mut metas = if let Some(metas) = meta.meta_item_list() { metas } else { - bad_attr(meta.span); - continue + let mut err = bad_attr(meta.span); + err.emit(); + continue; }; + if metas.is_empty() { + // FIXME (#55112): issue unused-attributes lint for `#[level()]` + continue; + } + + // Before processing the lint names, look for a reason (RFC 2383) + // at the end. + let mut reason = None; + let tail_li = &metas[metas.len()-1]; + if let Some(item) = tail_li.meta_item() { + match item.node { + ast::MetaItemKind::Word => {} // actual lint names handled later + ast::MetaItemKind::NameValue(ref name_value) => { + let gate_reasons = !self.sess.features_untracked().lint_reasons; + if item.ident == "reason" { + // found reason, reslice meta list to exclude it + metas = &metas[0..metas.len()-1]; + // FIXME (#55112): issue unused-attributes lint if we thereby + // don't have any lint names (`#[level(reason = "foo")]`) + if let ast::LitKind::Str(rationale, _) = name_value.node { + if gate_reasons { + feature_gate::emit_feature_err( + &self.sess.parse_sess, + "lint_reasons", + item.span, + feature_gate::GateIssue::Language, + "lint reasons are experimental" + ); + } else { + reason = Some(rationale); + } + } else { + let mut err = bad_attr(name_value.span); + err.help("reason must be a string literal"); + err.emit(); + } + } else { + let mut err = bad_attr(item.span); + err.emit(); + } + }, + ast::MetaItemKind::List(_) => { + let mut err = bad_attr(item.span); + err.emit(); + } + } + } + for li in metas { let word = match li.word() { Some(word) => word, None => { - bad_attr(li.span); - continue + let mut err = bad_attr(li.span); + if let Some(item) = li.meta_item() { + if let ast::MetaItemKind::NameValue(_) = item.node { + if item.ident == "reason" { + err.help("reason in lint attribute must come last"); + } + } + } + err.emit(); + continue; } }; let tool_name = if let Some(lint_tool) = word.is_scoped() { @@ -245,7 +302,7 @@ impl<'a> LintLevelsBuilder<'a> { let name = word.name(); match store.check_lint_name(&name.as_str(), tool_name) { CheckLintNameResult::Ok(ids) => { - let src = LintSource::Node(name, li.span); + let src = LintSource::Node(name, li.span, reason); for id in ids { specs.insert(*id, (level, src)); } @@ -255,7 +312,9 @@ impl<'a> LintLevelsBuilder<'a> { match result { Ok(ids) => { let complete_name = &format!("{}::{}", tool_name.unwrap(), name); - let src = LintSource::Node(Symbol::intern(complete_name), li.span); + let src = LintSource::Node( + Symbol::intern(complete_name), li.span, reason + ); for id in ids { specs.insert(*id, (level, src)); } @@ -286,7 +345,9 @@ impl<'a> LintLevelsBuilder<'a> { Applicability::MachineApplicable, ).emit(); - let src = LintSource::Node(Symbol::intern(&new_lint_name), li.span); + let src = LintSource::Node( + Symbol::intern(&new_lint_name), li.span, reason + ); for id in ids { specs.insert(*id, (level, src)); } @@ -368,11 +429,11 @@ impl<'a> LintLevelsBuilder<'a> { }; let forbidden_lint_name = match forbid_src { LintSource::Default => id.to_string(), - LintSource::Node(name, _) => name.to_string(), + LintSource::Node(name, _, _) => name.to_string(), LintSource::CommandLine(name) => name.to_string(), }; let (lint_attr_name, lint_attr_span) = match *src { - LintSource::Node(name, span) => (name, span), + LintSource::Node(name, span, _) => (name, span), _ => continue, }; let mut diag_builder = struct_span_err!(self.sess, @@ -384,15 +445,19 @@ impl<'a> LintLevelsBuilder<'a> { forbidden_lint_name); diag_builder.span_label(lint_attr_span, "overruled by previous forbid"); match forbid_src { - LintSource::Default => &mut diag_builder, - LintSource::Node(_, forbid_source_span) => { + LintSource::Default => {}, + LintSource::Node(_, forbid_source_span, reason) => { diag_builder.span_label(forbid_source_span, - "`forbid` level set here") + "`forbid` level set here"); + if let Some(rationale) = reason { + diag_builder.note(&rationale.as_str()); + } }, LintSource::CommandLine(_) => { - diag_builder.note("`forbid` lint level was set on command line") + diag_builder.note("`forbid` lint level was set on command line"); } - }.emit(); + } + diag_builder.emit(); // don't set a separate error for every lint in the group break } diff --git a/src/librustc/lint/mod.rs b/src/librustc/lint/mod.rs index c1e439ce60..4b878b8625 100644 --- a/src/librustc/lint/mod.rs +++ b/src/librustc/lint/mod.rs @@ -470,7 +470,7 @@ pub enum LintSource { Default, /// Lint level was set by an attribute. - Node(ast::Name, Span), + Node(ast::Name, Span, Option /* RFC 2383 reason */), /// Lint level was set by a command-line flag. CommandLine(Symbol), @@ -478,7 +478,7 @@ pub enum LintSource { impl_stable_hash_for!(enum self::LintSource { Default, - Node(name, span), + Node(name, span, reason), CommandLine(text) }); @@ -490,15 +490,12 @@ mod levels; pub use self::levels::{LintLevelSets, LintLevelMap}; +#[derive(Default)] pub struct LintBuffer { map: NodeMap>, } impl LintBuffer { - pub fn new() -> LintBuffer { - LintBuffer { map: NodeMap() } - } - pub fn add_lint(&mut self, lint: &'static Lint, id: ast::NodeId, @@ -578,7 +575,10 @@ pub fn struct_lint_level<'a>(sess: &'a Session, hyphen_case_flag_val)); } } - LintSource::Node(lint_attr_name, src) => { + LintSource::Node(lint_attr_name, src, reason) => { + if let Some(rationale) = reason { + err.note(&rationale.as_str()); + } sess.diag_span_note_once(&mut err, DiagnosticMessageId::from(lint), src, "lint level defined here"); if lint_attr_name.as_str() != name { diff --git a/src/librustc/macros.rs b/src/librustc/macros.rs index 897e9cc2a3..50375435eb 100644 --- a/src/librustc/macros.rs +++ b/src/librustc/macros.rs @@ -62,16 +62,6 @@ macro_rules! span_bug { }) } -#[macro_export] -macro_rules! static_assert { - ($name:ident: $test:expr) => { - // Use the bool to access an array such that if the bool is false, the access - // is out-of-bounds. - #[allow(dead_code)] - static $name: () = [()][!$test as usize]; - } -} - #[macro_export] macro_rules! __impl_stable_hash_field { ($field:ident, $ctx:expr, $hasher:expr) => ($field.hash_stable($ctx, $hasher)); @@ -81,62 +71,73 @@ macro_rules! __impl_stable_hash_field { #[macro_export] macro_rules! impl_stable_hash_for { + // Enums // FIXME(mark-i-m): Some of these should be `?` rather than `*`. See the git blame and change // them back when `?` is supported again. - (enum $enum_name:path { $( $variant:ident $( ( $($field:ident $(-> $delegate:tt)*),* ) )* ),* $(,)* }) => { - impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $enum_name { + (enum $enum_name:path { + $( $variant:ident + // this incorrectly allows specifying both tuple-like and struct-like fields, as in `Variant(a,b){c,d}`, + // when it should be only one or the other + $( ( $($field:ident $(-> $delegate:tt)*),* ) )* + $( { $($named_field:ident $(-> $named_delegate:tt)*),* } )* + ),* $(,)* + }) => { + impl_stable_hash_for!( + impl<> for enum $enum_name [ $enum_name ] { $( $variant + $( ( $($field $(-> $delegate)*),* ) )* + $( { $($named_field $(-> $named_delegate)*),* } )* + ),* } + ); + }; + // We want to use the enum name both in the `impl ... for $enum_name` as well as for + // importing all the variants. Unfortunately it seems we have to take the name + // twice for this purpose + (impl<$($lt:lifetime $(: $lt_bound:lifetime)* ),* $(,)* $($T:ident),* $(,)*> + for enum $enum_name:path + [ $enum_path:path ] + { + $( $variant:ident + // this incorrectly allows specifying both tuple-like and struct-like fields, as in `Variant(a,b){c,d}`, + // when it should be only one or the other + $( ( $($field:ident $(-> $delegate:tt)*),* ) )* + $( { $($named_field:ident $(-> $named_delegate:tt)*),* } )* + ),* $(,)* + }) => { + impl<'a, $($lt $(: $lt_bound)*,)* $($T,)*> + ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> + for $enum_name + where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),* + { #[inline] fn hash_stable(&self, __ctx: &mut $crate::ich::StableHashingContext<'a>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { - use $enum_name::*; + use $enum_path::*; ::std::mem::discriminant(self).hash_stable(__ctx, __hasher); match *self { $( - $variant $( ( $(ref $field),* ) )* => { + $variant $( ( $(ref $field),* ) )* $( { $(ref $named_field),* } )* => { $($( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );*)* + $($( __impl_stable_hash_field!($named_field, __ctx, __hasher $(, $named_delegate)*) );*)* } )* } } } }; + // Structs // FIXME(mark-i-m): same here. (struct $struct_name:path { $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { - impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name { - #[inline] - fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'a>, - __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { - let $struct_name { - $(ref $field),* - } = *self; - - $( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );* - } - } + impl_stable_hash_for!( + impl<'tcx> for struct $struct_name { $($field $(-> $delegate)*),* } + ); }; - // FIXME(mark-i-m): same here. - (tuple_struct $struct_name:path { $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { - impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name { - #[inline] - fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'a>, - __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { - let $struct_name ( - $(ref $field),* - ) = *self; - - $( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );* - } - } - }; - - (impl<$tcx:lifetime $(, $T:ident)*> for struct $struct_name:path { - $($field:ident),* $(,)* + (impl<$($lt:lifetime $(: $lt_bound:lifetime)* ),* $(,)* $($T:ident),* $(,)*> for struct $struct_name:path { + $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { - impl<'a, $tcx, $($T,)*> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name + impl<'a, $($lt $(: $lt_bound)*,)* $($T,)*> + ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),* { #[inline] @@ -147,7 +148,33 @@ macro_rules! impl_stable_hash_for { $(ref $field),* } = *self; - $( $field.hash_stable(__ctx, __hasher));* + $( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );* + } + } + }; + // Tuple structs + // We cannot use normale parentheses here, the parser won't allow it + // FIXME(mark-i-m): same here. + (tuple_struct $struct_name:path { $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { + impl_stable_hash_for!( + impl<'tcx> for tuple_struct $struct_name { $($field $(-> $delegate)*),* } + ); + }; + (impl<$($lt:lifetime $(: $lt_bound:lifetime)* ),* $(,)* $($T:ident),* $(,)*> + for tuple_struct $struct_name:path { $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { + impl<'a, $($lt $(: $lt_bound)*,)* $($T,)*> + ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name + where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),* + { + #[inline] + fn hash_stable(&self, + __ctx: &mut $crate::ich::StableHashingContext<'a>, + __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { + let $struct_name ( + $(ref $field),* + ) = *self; + + $( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );* } } }; diff --git a/src/librustc/middle/borrowck.rs b/src/librustc/middle/borrowck.rs index c8d513a59f..633c584137 100644 --- a/src/librustc/middle/borrowck.rs +++ b/src/librustc/middle/borrowck.rs @@ -18,9 +18,15 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher, #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub enum SignalledError { SawSomeError, NoErrorsSeen } +impl Default for SignalledError { + fn default() -> SignalledError { + SignalledError::NoErrorsSeen + } +} + impl_stable_hash_for!(enum self::SignalledError { SawSomeError, NoErrorsSeen }); -#[derive(Debug, RustcEncodable, RustcDecodable)] +#[derive(Debug, Default, RustcEncodable, RustcDecodable)] pub struct BorrowCheckResult { pub used_mut_nodes: FxHashSet, pub signalled_any_error: SignalledError, diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 81c48740e1..282b5d13e2 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -291,10 +291,8 @@ fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_, '_, '_>, return true; } - // (To be) stable attribute for #[lang = "panic_impl"] - if attr::contains_name(attrs, "panic_implementation") || - attr::contains_name(attrs, "panic_handler") - { + // Stable attribute for #[lang = "panic_impl"] + if attr::contains_name(attrs, "panic_handler") { return true; } diff --git a/src/librustc/middle/dependency_format.rs b/src/librustc/middle/dependency_format.rs index 8b5c6d147d..549a848a39 100644 --- a/src/librustc/middle/dependency_format.rs +++ b/src/librustc/middle/dependency_format.rs @@ -63,7 +63,6 @@ use hir::def_id::CrateNum; -use session; use session::config; use ty::TyCtxt; use middle::cstore::{self, DepKind}; @@ -94,12 +93,11 @@ pub enum Linkage { pub fn calculate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let sess = &tcx.sess; - let mut fmts = FxHashMap::default(); - for &ty in sess.crate_types.borrow().iter() { + let fmts = sess.crate_types.borrow().iter().map(|&ty| { let linkage = calculate_type(tcx, ty); verify_ok(tcx, &linkage); - fmts.insert(ty, linkage); - } + (ty, linkage) + }).collect::>(); sess.abort_if_errors(); sess.dependency_formats.set(fmts); } @@ -129,9 +127,8 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sess.crt_static() => Linkage::Static, config::CrateType::Executable => Linkage::Dynamic, - // proc-macro crates are required to be dylibs, and they're currently - // required to link to libsyntax as well. - config::CrateType::ProcMacro => Linkage::Dynamic, + // proc-macro crates are mostly cdylibs, but we also need metadata. + config::CrateType::ProcMacro => Linkage::Static, // No linkage happens with rlibs, we just needed the metadata (which we // got long ago), so don't bother with anything. @@ -225,7 +222,6 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // quite yet, so do so here. activate_injected_dep(*sess.injected_panic_runtime.get(), &mut ret, &|cnum| tcx.is_panic_runtime(cnum)); - activate_injected_allocator(sess, &mut ret); // When dylib B links to dylib A, then when using B we must also link to A. // It could be the case, however, that the rlib for A is present (hence we @@ -304,7 +300,6 @@ fn attempt_static<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option, } } -fn activate_injected_allocator(sess: &session::Session, - list: &mut DependencyList) { - let cnum = match sess.injected_allocator.get() { - Some(cnum) => cnum, - None => return, - }; - let idx = cnum.as_usize() - 1; - if list[idx] == Linkage::NotLinked { - list[idx] = Linkage::Static; - } -} - // After the linkage for a crate has been determined we need to verify that // there's only going to be one allocator in the output. fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) { diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index 7e9b26bbf7..5b92bfe6ad 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -938,7 +938,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let var_hir_id = self.tcx().hir.node_to_hir_id(freevar.var_id()); let closure_def_id = self.tcx().hir.local_def_id(closure_expr.id); let upvar_id = ty::UpvarId { - var_id: var_hir_id, + var_path: ty::UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_def_id.to_local(), }; let upvar_capture = self.mc.tables.upvar_capture(upvar_id); diff --git a/src/librustc/middle/intrinsicck.rs b/src/librustc/middle/intrinsicck.rs index 9d54e79846..d2be0e4dcb 100644 --- a/src/librustc/middle/intrinsicck.rs +++ b/src/librustc/middle/intrinsicck.rs @@ -11,9 +11,10 @@ use hir::def::Def; use hir::def_id::DefId; use ty::{self, Ty, TyCtxt}; -use ty::layout::{LayoutError, Pointer, SizeSkeleton}; +use ty::layout::{LayoutError, Pointer, SizeSkeleton, VariantIdx}; use rustc_target::spec::abi::Abi::RustIntrinsic; +use rustc_data_structures::indexed_vec::Idx; use syntax_pos::Span; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use hir; @@ -48,10 +49,13 @@ fn unpack_option_like<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if def.variants.len() == 2 && !def.repr.c() && def.repr.int.is_none() { let data_idx; - if def.variants[0].fields.is_empty() { - data_idx = 1; - } else if def.variants[1].fields.is_empty() { - data_idx = 0; + let one = VariantIdx::new(1); + let zero = VariantIdx::new(0); + + if def.variants[zero].fields.is_empty() { + data_idx = one; + } else if def.variants[one].fields.is_empty() { + data_idx = zero; } else { return ty; } @@ -84,7 +88,7 @@ impl<'a, 'tcx> ExprVisitor<'a, 'tcx> { // `Option` to present a clearer error. let from = unpack_option_like(self.tcx.global_tcx(), from); if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (&from.sty, sk_to) { - if size_to == Pointer.size(self.tcx) { + if size_to == Pointer.size(&self.tcx) { struct_span_err!(self.tcx.sess, span, E0591, "can't transmute zero-sized type") .note(&format!("source type: {}", from)) diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index 45de958e72..e7a8baf738 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -204,9 +204,7 @@ pub fn extract(attrs: &[ast::Attribute]) -> Option<(Symbol, Span)> { if let Some(value) = attribute.value_str() { return Some((value, attribute.span)); } - } else if attribute.check_name("panic_implementation") || - attribute.check_name("panic_handler") - { + } else if attribute.check_name("panic_handler") { return Some((Symbol::intern("panic_impl"), attribute.span)) } else if attribute.check_name("alloc_error_handler") { return Some((Symbol::intern("oom"), attribute.span)) @@ -271,6 +269,7 @@ language_item_table! { DropTraitLangItem, "drop", drop_trait, Target::Trait; CoerceUnsizedTraitLangItem, "coerce_unsized", coerce_unsized_trait, Target::Trait; + DispatchFromDynTraitLangItem,"dispatch_from_dyn", dispatch_from_dyn_trait, Target::Trait; AddTraitLangItem, "add", add_trait, Target::Trait; SubTraitLangItem, "sub", sub_trait, Target::Trait; @@ -298,6 +297,7 @@ language_item_table! { IndexMutTraitLangItem, "index_mut", index_mut_trait, Target::Trait; UnsafeCellTypeLangItem, "unsafe_cell", unsafe_cell_type, Target::Struct; + VaListTypeLangItem, "va_list", va_list, Target::Struct; DerefTraitLangItem, "deref", deref_trait, Target::Trait; DerefMutTraitLangItem, "deref_mut", deref_mut_trait, Target::Trait; diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index 1b258a2346..54a0192d2e 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -272,9 +272,9 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { tcx, num_live_nodes: 0, num_vars: 0, - live_node_map: HirIdMap(), - variable_map: HirIdMap(), - capture_info_map: NodeMap(), + live_node_map: HirIdMap::default(), + variable_map: HirIdMap::default(), + capture_info_map: Default::default(), var_kinds: Vec::new(), lnks: Vec::new(), } @@ -397,7 +397,7 @@ fn visit_fn<'a, 'tcx: 'a>(ir: &mut IrMaps<'a, 'tcx>, fn add_from_pat<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, pat: &P) { // For struct patterns, take note of which fields used shorthand // (`x` rather than `x: x`). - let mut shorthand_field_ids = HirIdSet(); + let mut shorthand_field_ids = HirIdSet::default(); let mut pats = VecDeque::new(); pats.push_back(pat); while let Some(pat) = pats.pop_front() { @@ -691,8 +691,8 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { s: specials, successors: vec![invalid_node(); num_live_nodes], rwu_table: RWUTable::new(num_live_nodes * num_vars), - break_ln: NodeMap(), - cont_ln: NodeMap(), + break_ln: Default::default(), + cont_ln: Default::default(), } } diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 13e6f7a4c7..cadf0c42d2 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -76,6 +76,7 @@ use hir::def::{Def, CtorKind}; use ty::adjustment; use ty::{self, Ty, TyCtxt}; use ty::fold::TypeFoldable; +use ty::layout::VariantIdx; use hir::{MutImmutable, MutMutable, PatKind}; use hir::pat_util::EnumerateAndAdjustIterator; @@ -87,12 +88,14 @@ use std::borrow::Cow; use std::fmt; use std::hash::{Hash, Hasher}; use rustc_data_structures::sync::Lrc; +use rustc_data_structures::indexed_vec::Idx; use std::rc::Rc; use util::nodemap::ItemLocalSet; #[derive(Clone, Debug, PartialEq)] pub enum Categorization<'tcx> { Rvalue(ty::Region<'tcx>), // temporary val, argument is its scope + ThreadLocal(ty::Region<'tcx>), // value that cannot move, but still restricted in scope StaticItem, Upvar(Upvar), // upvar referenced by closure env Local(ast::NodeId), // local variable @@ -226,7 +229,7 @@ impl<'tcx> cmt_<'tcx> { } _ => { assert_eq!(adt_def.variants.len(), 1); - &adt_def.variants[0] + &adt_def.variants[VariantIdx::new(0)] } }; Some((adt_def, &variant_def.fields[field_index])) @@ -268,6 +271,7 @@ impl<'tcx> cmt_<'tcx> { Categorization::Deref(ref base_cmt, _) => { base_cmt.immutability_blame() } + Categorization::ThreadLocal(..) | Categorization::StaticItem => { // Do we want to do something here? None @@ -715,17 +719,23 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } Def::Static(def_id, mutbl) => { - // `#[thread_local]` statics may not outlive the current function. - for attr in &self.tcx.get_attrs(def_id)[..] { - if attr.check_name("thread_local") { - return Ok(self.cat_rvalue_node(hir_id, span, expr_ty)); - } - } + // `#[thread_local]` statics may not outlive the current function, but + // they also cannot be moved out of. + let is_thread_local = self.tcx.get_attrs(def_id)[..] + .iter() + .any(|attr| attr.check_name("thread_local")); + + let cat = if is_thread_local { + let re = self.temporary_scope(hir_id.local_id); + Categorization::ThreadLocal(re) + } else { + Categorization::StaticItem + }; Ok(cmt_ { hir_id, - span:span, - cat:Categorization::StaticItem, + span, + cat, mutbl: if mutbl { McDeclared } else { McImmutable}, ty:expr_ty, note: NoteNone @@ -808,7 +818,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let closure_expr_def_id = self.tcx.hir.local_def_id(fn_node_id); let var_hir_id = self.tcx.hir.node_to_hir_id(var_id); let upvar_id = ty::UpvarId { - var_id: var_hir_id, + var_path: ty::UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_expr_def_id.to_local(), }; @@ -1408,6 +1418,7 @@ impl<'tcx> cmt_<'tcx> { match self.cat { Categorization::Rvalue(..) | Categorization::StaticItem | + Categorization::ThreadLocal(..) | Categorization::Local(..) | Categorization::Deref(_, UnsafePtr(..)) | Categorization::Deref(_, BorrowedPtr(..)) | @@ -1439,6 +1450,7 @@ impl<'tcx> cmt_<'tcx> { } Categorization::Rvalue(..) | + Categorization::ThreadLocal(..) | Categorization::Local(..) | Categorization::Upvar(..) | Categorization::Deref(_, UnsafePtr(..)) => { // yes, it's aliasable, but... @@ -1485,6 +1497,9 @@ impl<'tcx> cmt_<'tcx> { Categorization::StaticItem => { "static item".into() } + Categorization::ThreadLocal(..) => { + "thread-local static item".into() + } Categorization::Rvalue(..) => { "non-place".into() } diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 8c3a3fb6dc..ab0094df0e 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -117,8 +117,9 @@ impl<'a, 'tcx> Visitor<'tcx> for ReachableContext<'a, 'tcx> { self.reachable_symbols.insert(node_id); } Some(def) => { - let def_id = def.def_id(); - if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { + if let Some((node_id, def_id)) = def.opt_def_id().and_then(|def_id| { + self.tcx.hir.as_local_node_id(def_id).map(|node_id| (node_id, def_id)) + }) { if self.def_id_represents_local_inlined_item(def_id) { self.worklist.push(node_id); } else { @@ -408,7 +409,7 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> let mut reachable_context = ReachableContext { tcx, tables: &ty::TypeckTables::empty(None), - reachable_symbols: NodeSet(), + reachable_symbols: Default::default(), worklist: Vec::new(), any_library, }; diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index d00fbdeca2..35d1a4dd2c 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -14,7 +14,7 @@ //! For more information about how MIR-based region-checking works, //! see the [rustc guide]. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/mir/borrowck.html use ich::{StableHashingContext, NodeIdHashingMode}; use util::nodemap::{FxHashMap, FxHashSet}; diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index 361abb1689..07054ee99a 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -425,8 +425,8 @@ fn resolve_lifetimes<'tcx>( fn krate<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> NamedRegionMap { let krate = tcx.hir.krate(); let mut map = NamedRegionMap { - defs: NodeMap(), - late_bound: NodeSet(), + defs: Default::default(), + late_bound: Default::default(), object_lifetime_defaults: compute_object_lifetime_defaults(tcx), }; { @@ -437,8 +437,8 @@ fn krate<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> NamedRegionMap { trait_ref_hack: false, is_in_fn_syntax: false, labels_in_fn: vec![], - xcrate_object_lifetime_defaults: DefIdMap(), - lifetime_uses: &mut DefIdMap(), + xcrate_object_lifetime_defaults: Default::default(), + lifetime_uses: &mut Default::default(), }; for (_, item) in &krate.items { visitor.visit_item(item); @@ -447,6 +447,17 @@ fn krate<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> NamedRegionMap { map } +/// In traits, there is an implicit `Self` type parameter which comes before the generics. +/// We have to account for this when computing the index of the other generic parameters. +/// This function returns whether there is such an implicit parameter defined on the given item. +fn sub_items_have_self_param(node: &hir::ItemKind) -> bool { + match *node { + hir::ItemKind::Trait(..) | + hir::ItemKind::TraitAlias(..) => true, + _ => false, + } +} + impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::All(&self.tcx.hir) @@ -522,8 +533,8 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { hir::ItemKind::Impl(..) => true, _ => false, }; - // These kinds of items have only early bound lifetime parameters. - let mut index = if let hir::ItemKind::Trait(..) = item.node { + // These kinds of items have only early-bound lifetime parameters. + let mut index = if sub_items_have_self_param(&item.node) { 1 // Self comes before lifetimes } else { 0 @@ -1267,7 +1278,7 @@ fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body) { fn compute_object_lifetime_defaults( tcx: TyCtxt<'_, '_, '_>, ) -> NodeMap> { - let mut map = NodeMap(); + let mut map = NodeMap::default(); for item in tcx.hir.krate().items.values() { match item.node { hir::ItemKind::Struct(_, ref generics) @@ -1421,7 +1432,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } = self; let labels_in_fn = replace(&mut self.labels_in_fn, vec![]); let xcrate_object_lifetime_defaults = - replace(&mut self.xcrate_object_lifetime_defaults, DefIdMap()); + replace(&mut self.xcrate_object_lifetime_defaults, DefIdMap::default()); let mut this = LifetimeContext { tcx: *tcx, map: map, @@ -1443,23 +1454,101 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { /// helper method to determine the span to remove when suggesting the /// deletion of a lifetime fn lifetime_deletion_span(&self, name: ast::Ident, generics: &hir::Generics) -> Option { - if generics.params.len() == 1 { - // if sole lifetime, remove the `<>` brackets - Some(generics.span) - } else { - generics.params.iter().enumerate().find_map(|(i, param)| { - if param.name.ident() == name { - // We also want to delete a leading or trailing comma - // as appropriate - if i >= generics.params.len() - 1 { - Some(generics.params[i - 1].span.shrink_to_hi().to(param.span)) - } else { - Some(param.span.to(generics.params[i + 1].span.shrink_to_lo())) + generics.params.iter().enumerate().find_map(|(i, param)| { + if param.name.ident() == name { + let mut in_band = false; + if let hir::GenericParamKind::Lifetime { kind } = param.kind { + if let hir::LifetimeParamKind::InBand = kind { + in_band = true; } - } else { - None } - }) + if in_band { + Some(param.span) + } else { + if generics.params.len() == 1 { + // if sole lifetime, remove the entire `<>` brackets + Some(generics.span) + } else { + // if removing within `<>` brackets, we also want to + // delete a leading or trailing comma as appropriate + if i >= generics.params.len() - 1 { + Some(generics.params[i - 1].span.shrink_to_hi().to(param.span)) + } else { + Some(param.span.to(generics.params[i + 1].span.shrink_to_lo())) + } + } + } + } else { + None + } + }) + } + + // helper method to issue suggestions from `fn rah<'a>(&'a T)` to `fn rah(&T)` + fn suggest_eliding_single_use_lifetime( + &self, err: &mut DiagnosticBuilder<'_>, def_id: DefId, lifetime: &hir::Lifetime + ) { + // FIXME: future work: also suggest `impl Foo<'_>` for `impl<'a> Foo<'a>` + let name = lifetime.name.ident(); + let mut remove_decl = None; + if let Some(parent_def_id) = self.tcx.parent(def_id) { + if let Some(generics) = self.tcx.hir.get_generics(parent_def_id) { + remove_decl = self.lifetime_deletion_span(name, generics); + } + } + + let mut remove_use = None; + let mut find_arg_use_span = |inputs: &hir::HirVec| { + for input in inputs { + if let hir::TyKind::Rptr(lt, _) = input.node { + if lt.name.ident() == name { + // include the trailing whitespace between the ampersand and the type name + let lt_through_ty_span = lifetime.span.to(input.span.shrink_to_hi()); + remove_use = Some( + self.tcx.sess.source_map() + .span_until_non_whitespace(lt_through_ty_span) + ); + break; + } + } + } + }; + if let Node::Lifetime(hir_lifetime) = self.tcx.hir.get(lifetime.id) { + if let Some(parent) = self.tcx.hir.find(self.tcx.hir.get_parent(hir_lifetime.id)) { + match parent { + Node::Item(item) => { + if let hir::ItemKind::Fn(decl, _, _, _) = &item.node { + find_arg_use_span(&decl.inputs); + } + }, + Node::ImplItem(impl_item) => { + if let hir::ImplItemKind::Method(sig, _) = &impl_item.node { + find_arg_use_span(&sig.decl.inputs); + } + } + _ => {} + } + } + } + + if let (Some(decl_span), Some(use_span)) = (remove_decl, remove_use) { + // if both declaration and use deletion spans start at the same + // place ("start at" because the latter includes trailing + // whitespace), then this is an in-band lifetime + if decl_span.shrink_to_lo() == use_span.shrink_to_lo() { + err.span_suggestion_with_applicability( + use_span, + "elide the single-use lifetime", + String::new(), + Applicability::MachineApplicable, + ); + } else { + err.multipart_suggestion_with_applicability( + "elide the single-use lifetime", + vec![(decl_span, String::new()), (use_span, String::new())], + Applicability::MachineApplicable, + ); + } } } @@ -1484,7 +1573,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { .collect(); // ensure that we issue lints in a repeatable order - def_ids.sort_by_key(|&def_id| self.tcx.def_path_hash(def_id)); + def_ids.sort_by_cached_key(|&def_id| self.tcx.def_path_hash(def_id)); for def_id in def_ids { debug!( @@ -1515,14 +1604,26 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { _ => None, } { debug!("id = {:?} span = {:?} name = {:?}", node_id, span, name); + + if name == keywords::UnderscoreLifetime.ident() { + continue; + } + let mut err = self.tcx.struct_span_lint_node( lint::builtin::SINGLE_USE_LIFETIMES, id, span, &format!("lifetime parameter `{}` only used once", name), ); - err.span_label(span, "this lifetime..."); - err.span_label(lifetime.span, "...is used only here"); + + if span == lifetime.span { + // spans are the same for in-band lifetime declarations + err.span_label(span, "this lifetime is only used here"); + } else { + err.span_label(span, "this lifetime..."); + err.span_label(lifetime.span, "...is used only here"); + } + self.suggest_eliding_single_use_lifetime(&mut err, def_id, lifetime); err.emit(); } } @@ -1555,7 +1656,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { if let Some(span) = unused_lt_span { err.span_suggestion_with_applicability( span, - "remove it", + "elide the unused lifetime", String::new(), Applicability::MachineApplicable, ); @@ -1602,8 +1703,8 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let mut index = 0; if let Some(parent_id) = parent_id { let parent = self.tcx.hir.expect_item(parent_id); - if let hir::ItemKind::Trait(..) = parent.node { - index += 1; // Self comes first. + if sub_items_have_self_param(&parent.node) { + index += 1; // Self comes before lifetimes } match parent.node { hir::ItemKind::Trait(_, _, ref generics, ..) @@ -2640,9 +2741,7 @@ fn insert_late_bound_lifetimes( constrained_by_input.visit_ty(arg_ty); } - let mut appears_in_output = AllCollector { - regions: Default::default(), - }; + let mut appears_in_output = AllCollector::default(); intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output); debug!( @@ -2654,9 +2753,7 @@ fn insert_late_bound_lifetimes( // // Subtle point: because we disallow nested bindings, we can just // ignore binders here and scrape up all names we see. - let mut appears_in_where_clause = AllCollector { - regions: Default::default(), - }; + let mut appears_in_where_clause = AllCollector::default(); appears_in_where_clause.visit_generics(generics); for param in &generics.params { @@ -2753,6 +2850,7 @@ fn insert_late_bound_lifetimes( } } + #[derive(Default)] struct AllCollector { regions: FxHashSet, } diff --git a/src/librustc/middle/stability.rs b/src/librustc/middle/stability.rs index f19aa67a83..543d1053b5 100644 --- a/src/librustc/middle/stability.rs +++ b/src/librustc/middle/stability.rs @@ -134,11 +134,11 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { if self.tcx.features().staged_api { // This crate explicitly wants staged API. debug!("annotate(id = {:?}, attrs = {:?})", id, attrs); - if let Some(..) = attr::find_deprecation(self.tcx.sess.diagnostic(), attrs, item_sp) { + if let Some(..) = attr::find_deprecation(&self.tcx.sess.parse_sess, attrs, item_sp) { self.tcx.sess.span_err(item_sp, "`#[deprecated]` cannot be used in staged api, \ use `#[rustc_deprecated]` instead"); } - if let Some(mut stab) = attr::find_stability(self.tcx.sess.diagnostic(), + if let Some(mut stab) = attr::find_stability(&self.tcx.sess.parse_sess, attrs, item_sp) { // Error if prohibited, or can't inherit anything from a container if kind == AnnotationKind::Prohibited || @@ -224,7 +224,7 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { } } - if let Some(depr) = attr::find_deprecation(self.tcx.sess.diagnostic(), attrs, item_sp) { + if let Some(depr) = attr::find_deprecation(&self.tcx.sess.parse_sess, attrs, item_sp) { if kind == AnnotationKind::Prohibited { self.tcx.sess.span_err(item_sp, "This deprecation annotation is useless"); } @@ -469,7 +469,7 @@ impl<'a, 'tcx> Index<'tcx> { /// Cross-references the feature names of unstable APIs with enabled /// features and possibly prints errors. pub fn check_unstable_api_usage<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut checker = Checker { tcx: tcx }; + let mut checker = Checker { tcx }; tcx.hir.krate().visit_all_item_likes(&mut checker.as_deep_visitor()); } diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs new file mode 100644 index 0000000000..ab3bc4cdf9 --- /dev/null +++ b/src/librustc/mir/interpret/allocation.rs @@ -0,0 +1,719 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The virtual memory representation of the MIR interpreter + +use super::{ + Pointer, EvalResult, AllocId, ScalarMaybeUndef, write_target_uint, read_target_uint, Scalar, + truncate, +}; + +use ty::layout::{Size, Align}; +use syntax::ast::Mutability; +use std::iter; +use mir; +use std::ops::{Deref, DerefMut}; +use rustc_data_structures::sorted_map::SortedMap; +use rustc_target::abi::HasDataLayout; + +/// Used by `check_bounds` to indicate whether the pointer needs to be just inbounds +/// or also inbounds of a *live* allocation. +#[derive(Debug, Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum InboundsCheck { + Live, + MaybeDead, +} + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct Allocation { + /// The actual bytes of the allocation. + /// Note that the bytes of a pointer represent the offset of the pointer + pub bytes: Vec, + /// Maps from byte addresses to extra data for each pointer. + /// Only the first byte of a pointer is inserted into the map; i.e., + /// every entry in this map applies to `pointer_size` consecutive bytes starting + /// at the given offset. + pub relocations: Relocations, + /// Denotes undefined memory. Reading from undefined memory is forbidden in miri + pub undef_mask: UndefMask, + /// The alignment of the allocation to detect unaligned reads. + pub align: Align, + /// Whether the allocation is mutable. + /// Also used by codegen to determine if a static should be put into mutable memory, + /// which happens for `static mut` and `static` with interior mutability. + pub mutability: Mutability, + /// Extra state for the machine. + pub extra: Extra, +} + + +pub trait AllocationExtra: ::std::fmt::Debug + Clone { + /// Hook to initialize the extra data when an allocation gets created. + fn memory_allocated( + _size: Size, + _memory_extra: &MemoryExtra + ) -> Self; + + /// Hook for performing extra checks on a memory read access. + /// + /// Takes read-only access to the allocation so we can keep all the memory read + /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you + /// need to mutate. + #[inline(always)] + fn memory_read( + _alloc: &Allocation, + _ptr: Pointer, + _size: Size, + ) -> EvalResult<'tcx> { + Ok(()) + } + + /// Hook for performing extra checks on a memory write access. + #[inline(always)] + fn memory_written( + _alloc: &mut Allocation, + _ptr: Pointer, + _size: Size, + ) -> EvalResult<'tcx> { + Ok(()) + } + + /// Hook for performing extra checks on a memory deallocation. + /// `size` will be the size of the allocation. + #[inline(always)] + fn memory_deallocated( + _alloc: &mut Allocation, + _ptr: Pointer, + _size: Size, + ) -> EvalResult<'tcx> { + Ok(()) + } +} + +impl AllocationExtra<(), ()> for () { + #[inline(always)] + fn memory_allocated( + _size: Size, + _memory_extra: &() + ) -> Self { + () + } +} + +impl Allocation { + /// Creates a read-only allocation initialized by the given bytes + pub fn from_bytes(slice: &[u8], align: Align, extra: Extra) -> Self { + let mut undef_mask = UndefMask::new(Size::ZERO); + undef_mask.grow(Size::from_bytes(slice.len() as u64), true); + Self { + bytes: slice.to_owned(), + relocations: Relocations::new(), + undef_mask, + align, + mutability: Mutability::Immutable, + extra, + } + } + + pub fn from_byte_aligned_bytes(slice: &[u8], extra: Extra) -> Self { + Allocation::from_bytes(slice, Align::from_bytes(1).unwrap(), extra) + } + + pub fn undef(size: Size, align: Align, extra: Extra) -> Self { + assert_eq!(size.bytes() as usize as u64, size.bytes()); + Allocation { + bytes: vec![0; size.bytes() as usize], + relocations: Relocations::new(), + undef_mask: UndefMask::new(size), + align, + mutability: Mutability::Mutable, + extra, + } + } +} + +impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} + +/// Alignment and bounds checks +impl<'tcx, Tag, Extra> Allocation { + /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end + /// of an allocation (i.e., at the first *inaccessible* location) *is* considered + /// in-bounds! This follows C's/LLVM's rules. + /// If you want to check bounds before doing a memory access, better use `check_bounds`. + pub fn check_bounds_ptr( + &self, + ptr: Pointer, + ) -> EvalResult<'tcx> { + let allocation_size = self.bytes.len() as u64; + ptr.check_in_alloc(Size::from_bytes(allocation_size), InboundsCheck::Live) + } + + /// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds". + #[inline(always)] + pub fn check_bounds( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx> { + // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) + self.check_bounds_ptr(ptr.offset(size, cx)?) + } +} + +/// Byte accessors +impl<'tcx, Tag: Copy, Extra> Allocation { + /// The last argument controls whether we error out when there are undefined + /// or pointer bytes. You should never call this, call `get_bytes` or + /// `get_bytes_with_undef_and_ptr` instead, + /// + /// This function also guarantees that the resulting pointer will remain stable + /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies + /// on that. + fn get_bytes_internal( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + check_defined_and_ptr: bool, + ) -> EvalResult<'tcx, &[u8]> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + self.check_bounds(cx, ptr, size)?; + + if check_defined_and_ptr { + self.check_defined(ptr, size)?; + self.check_relocations(cx, ptr, size)?; + } else { + // We still don't want relocations on the *edges* + self.check_relocation_edges(cx, ptr, size)?; + } + + AllocationExtra::memory_read(self, ptr, size)?; + + assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); + assert_eq!(size.bytes() as usize as u64, size.bytes()); + let offset = ptr.offset.bytes() as usize; + Ok(&self.bytes[offset..offset + size.bytes() as usize]) + } + + #[inline] + pub fn get_bytes( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx, &[u8]> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + self.get_bytes_internal(cx, ptr, size, true) + } + + /// It is the caller's responsibility to handle undefined and pointer bytes. + /// However, this still checks that there are no relocations on the *edges*. + #[inline] + pub fn get_bytes_with_undef_and_ptr( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx, &[u8]> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + self.get_bytes_internal(cx, ptr, size, false) + } + + /// Just calling this already marks everything as defined and removes relocations, + /// so be sure to actually put data there! + pub fn get_bytes_mut( + &mut self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx, &mut [u8]> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`"); + self.check_bounds(cx, ptr, size)?; + + self.mark_definedness(ptr, size, true)?; + self.clear_relocations(cx, ptr, size)?; + + AllocationExtra::memory_written(self, ptr, size)?; + + assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); + assert_eq!(size.bytes() as usize as u64, size.bytes()); + let offset = ptr.offset.bytes() as usize; + Ok(&mut self.bytes[offset..offset + size.bytes() as usize]) + } +} + +/// Reading and writing +impl<'tcx, Tag: Copy, Extra> Allocation { + /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached + /// before a `0` is found. + pub fn read_c_str( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + ) -> EvalResult<'tcx, &[u8]> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); + let offset = ptr.offset.bytes() as usize; + match self.bytes[offset..].iter().position(|&c| c == 0) { + Some(size) => { + let size_with_null = Size::from_bytes((size + 1) as u64); + // Go through `get_bytes` for checks and AllocationExtra hooks. + // We read the null, so we include it in the request, but we want it removed + // from the result! + Ok(&self.get_bytes(cx, ptr, size_with_null)?[..size]) + } + None => err!(UnterminatedCString(ptr.erase_tag())), + } + } + + /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a + /// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the + /// given range contains neither relocations nor undef bytes. + pub fn check_bytes( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + allow_ptr_and_undef: bool, + ) -> EvalResult<'tcx> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + // Check bounds and relocations on the edges + self.get_bytes_with_undef_and_ptr(cx, ptr, size)?; + // Check undef and ptr + if !allow_ptr_and_undef { + self.check_defined(ptr, size)?; + self.check_relocations(cx, ptr, size)?; + } + Ok(()) + } + + /// Writes `src` to the memory starting at `ptr.offset`. + /// + /// Will do bounds checks on the allocation. + pub fn write_bytes( + &mut self, + cx: &impl HasDataLayout, + ptr: Pointer, + src: &[u8], + ) -> EvalResult<'tcx> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(src.len() as u64))?; + bytes.clone_from_slice(src); + Ok(()) + } + + /// Sets `count` bytes starting at `ptr.offset` with `val`. Basically `memset`. + pub fn write_repeat( + &mut self, + cx: &impl HasDataLayout, + ptr: Pointer, + val: u8, + count: Size + ) -> EvalResult<'tcx> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + let bytes = self.get_bytes_mut(cx, ptr, count)?; + for b in bytes { + *b = val; + } + Ok(()) + } + + /// Read a *non-ZST* scalar + /// + /// zsts can't be read out of two reasons: + /// * byteorder cannot work with zero element buffers + /// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers + /// being valid for ZSTs + /// + /// Note: This function does not do *any* alignment checks, you need to do these before calling + pub fn read_scalar( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size + ) -> EvalResult<'tcx, ScalarMaybeUndef> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + // get_bytes_unchecked tests relocation edges + let bytes = self.get_bytes_with_undef_and_ptr(cx, ptr, size)?; + // Undef check happens *after* we established that the alignment is correct. + // We must not return Ok() for unaligned pointers! + if self.check_defined(ptr, size).is_err() { + // this inflates undefined bytes to the entire scalar, even if only a few + // bytes are undefined + return Ok(ScalarMaybeUndef::Undef); + } + // Now we do the actual reading + let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); + // See if we got a pointer + if size != cx.data_layout().pointer_size { + // *Now* better make sure that the inside also is free of relocations. + self.check_relocations(cx, ptr, size)?; + } else { + match self.relocations.get(&ptr.offset) { + Some(&(tag, alloc_id)) => { + let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag); + return Ok(ScalarMaybeUndef::Scalar(ptr.into())) + } + None => {}, + } + } + // We don't. Just return the bits. + Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size))) + } + + /// Note: This function does not do *any* alignment checks, you need to do these before calling + pub fn read_ptr_sized( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + ) -> EvalResult<'tcx, ScalarMaybeUndef> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + self.read_scalar(cx, ptr, cx.data_layout().pointer_size) + } + + /// Write a *non-ZST* scalar + /// + /// zsts can't be read out of two reasons: + /// * byteorder cannot work with zero element buffers + /// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers + /// being valid for ZSTs + /// + /// Note: This function does not do *any* alignment checks, you need to do these before calling + pub fn write_scalar( + &mut self, + cx: &impl HasDataLayout, + ptr: Pointer, + val: ScalarMaybeUndef, + type_size: Size, + ) -> EvalResult<'tcx> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + let val = match val { + ScalarMaybeUndef::Scalar(scalar) => scalar, + ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false), + }; + + let bytes = match val { + Scalar::Ptr(val) => { + assert_eq!(type_size, cx.data_layout().pointer_size); + val.offset.bytes() as u128 + } + + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, type_size.bytes()); + debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, + "Unexpected value of size {} when writing to memory", size); + bits + }, + }; + + let endian = cx.data_layout().endian; + let dst = self.get_bytes_mut(cx, ptr, type_size)?; + write_target_uint(endian, dst, bytes).unwrap(); + + // See if we have to also write a relocation + match val { + Scalar::Ptr(val) => { + self.relocations.insert( + ptr.offset, + (val.tag, val.alloc_id), + ); + } + _ => {} + } + + Ok(()) + } + + /// Note: This function does not do *any* alignment checks, you need to do these before calling + pub fn write_ptr_sized( + &mut self, + cx: &impl HasDataLayout, + ptr: Pointer, + val: ScalarMaybeUndef + ) -> EvalResult<'tcx> + // FIXME: Working around https://github.com/rust-lang/rust/issues/56209 + where Extra: AllocationExtra + { + let ptr_size = cx.data_layout().pointer_size; + self.write_scalar(cx, ptr.into(), val, ptr_size) + } +} + +/// Relocations +impl<'tcx, Tag: Copy, Extra> Allocation { + /// Return all relocations overlapping with the given ptr-offset pair. + pub fn relocations( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> &[(Size, (Tag, AllocId))] { + // We have to go back `pointer_size - 1` bytes, as that one would still overlap with + // the beginning of this range. + let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); + let end = ptr.offset + size; // this does overflow checking + self.relocations.range(Size::from_bytes(start)..end) + } + + /// Check that there are no relocations overlapping with the given range. + #[inline(always)] + fn check_relocations( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx> { + if self.relocations(cx, ptr, size).is_empty() { + Ok(()) + } else { + err!(ReadPointerAsBytes) + } + } + + /// Remove all relocations inside the given range. + /// If there are relocations overlapping with the edges, they + /// are removed as well *and* the bytes they cover are marked as + /// uninitialized. This is a somewhat odd "spooky action at a distance", + /// but it allows strictly more code to run than if we would just error + /// immediately in that case. + fn clear_relocations( + &mut self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx> { + // Find the start and end of the given range and its outermost relocations. + let (first, last) = { + // Find all relocations overlapping the given range. + let relocations = self.relocations(cx, ptr, size); + if relocations.is_empty() { + return Ok(()); + } + + (relocations.first().unwrap().0, + relocations.last().unwrap().0 + cx.data_layout().pointer_size) + }; + let start = ptr.offset; + let end = start + size; + + // Mark parts of the outermost relocations as undefined if they partially fall outside the + // given range. + if first < start { + self.undef_mask.set_range(first, start, false); + } + if last > end { + self.undef_mask.set_range(end, last, false); + } + + // Forget all the relocations. + self.relocations.remove_range(first..last); + + Ok(()) + } + + /// Error if there are relocations overlapping with the edges of the + /// given memory range. + #[inline] + fn check_relocation_edges( + &self, + cx: &impl HasDataLayout, + ptr: Pointer, + size: Size, + ) -> EvalResult<'tcx> { + self.check_relocations(cx, ptr, Size::ZERO)?; + self.check_relocations(cx, ptr.offset(size, cx)?, Size::ZERO)?; + Ok(()) + } +} + + +/// Undefined bytes +impl<'tcx, Tag, Extra> Allocation { + /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes` + /// error which will report the first byte which is undefined. + #[inline] + fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { + self.undef_mask.is_range_defined( + ptr.offset, + ptr.offset + size, + ).or_else(|idx| err!(ReadUndefBytes(idx))) + } + + pub fn mark_definedness( + &mut self, + ptr: Pointer, + size: Size, + new_state: bool, + ) -> EvalResult<'tcx> { + if size.bytes() == 0 { + return Ok(()); + } + self.undef_mask.set_range( + ptr.offset, + ptr.offset + size, + new_state, + ); + Ok(()) + } +} + +/// Relocations +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct Relocations(SortedMap); + +impl Relocations { + pub fn new() -> Self { + Relocations(SortedMap::new()) + } + + // The caller must guarantee that the given relocations are already sorted + // by address and contain no duplicates. + pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { + Relocations(SortedMap::from_presorted_elements(r)) + } +} + +impl Deref for Relocations { + type Target = SortedMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Relocations { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Undefined byte tracking +//////////////////////////////////////////////////////////////////////////////// + +type Block = u64; +const BLOCK_SIZE: u64 = 64; + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct UndefMask { + blocks: Vec, + len: Size, +} + +impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); + +impl UndefMask { + pub fn new(size: Size) -> Self { + let mut m = UndefMask { + blocks: vec![], + len: Size::ZERO, + }; + m.grow(size, false); + m + } + + /// Check whether the range `start..end` (end-exclusive) is entirely defined. + /// + /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte + /// at which the first undefined access begins. + #[inline] + pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> { + if end > self.len { + return Err(self.len); + } + + let idx = (start.bytes()..end.bytes()) + .map(|i| Size::from_bytes(i)) + .find(|&i| !self.get(i)); + + match idx { + Some(idx) => Err(idx), + None => Ok(()) + } + } + + pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { + let len = self.len; + if end > len { + self.grow(end - len, new_state); + } + self.set_range_inbounds(start, end, new_state); + } + + pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { + for i in start.bytes()..end.bytes() { + self.set(Size::from_bytes(i), new_state); + } + } + + #[inline] + pub fn get(&self, i: Size) -> bool { + let (block, bit) = bit_index(i); + (self.blocks[block] & 1 << bit) != 0 + } + + #[inline] + pub fn set(&mut self, i: Size, new_state: bool) { + let (block, bit) = bit_index(i); + if new_state { + self.blocks[block] |= 1 << bit; + } else { + self.blocks[block] &= !(1 << bit); + } + } + + pub fn grow(&mut self, amount: Size, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); + if amount.bytes() > unused_trailing_bits { + let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; + assert_eq!(additional_blocks as usize as u64, additional_blocks); + self.blocks.extend( + iter::repeat(0).take(additional_blocks as usize), + ); + } + let start = self.len; + self.len += amount; + self.set_range_inbounds(start, start + amount, new_state); + } +} + +#[inline] +fn bit_index(bits: Size) -> (usize, usize) { + let bits = bits.bytes(); + let a = bits / BLOCK_SIZE; + let b = bits % BLOCK_SIZE; + assert_eq!(a as usize as u64, a); + assert_eq!(b as usize as u64, b); + (a as usize, b as usize) +} diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index fe466e247c..503e0abdbf 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -10,47 +10,83 @@ use std::{fmt, env}; +use hir::map::definitions::DefPathData; use mir; -use ty::{Ty, layout}; -use ty::layout::{Size, Align}; -use rustc_data_structures::sync::Lrc; +use ty::{self, Ty, layout}; +use ty::layout::{Size, Align, LayoutError}; use rustc_target::spec::abi::Abi; -use super::{ - Pointer, Lock, AccessKind -}; +use super::{RawConst, Pointer, InboundsCheck, ScalarMaybeUndef}; use backtrace::Backtrace; -use ty; use ty::query::TyCtxtAt; use errors::DiagnosticBuilder; -use syntax_pos::Span; +use syntax_pos::{Pos, Span}; use syntax::ast; use syntax::symbol::Symbol; -pub type ConstEvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, Lrc>>; +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ErrorHandled { + /// Already reported a lint or an error for this evaluation + Reported, + /// Don't emit an error, the evaluation failed because the MIR was generic + /// and the substs didn't fully monomorphize it. + TooGeneric, +} + +impl ErrorHandled { + pub fn assert_reported(self) { + match self { + ErrorHandled::Reported => {}, + ErrorHandled::TooGeneric => bug!("MIR interpretation failed without reporting an error \ + even though it was fully monomorphized"), + } + } +} + +pub type ConstEvalRawResult<'tcx> = Result, ErrorHandled>; +pub type ConstEvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, ErrorHandled>; #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct ConstEvalErr<'tcx> { pub span: Span, - pub error: ::mir::interpret::EvalError<'tcx>, - pub stacktrace: Vec, + pub error: ::mir::interpret::EvalErrorKind<'tcx, u64>, + pub stacktrace: Vec>, } #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] -pub struct FrameInfo { - pub span: Span, - pub location: String, +pub struct FrameInfo<'tcx> { + pub call_site: Span, // this span is in the caller! + pub instance: ty::Instance<'tcx>, pub lint_root: Option, } +impl<'tcx> fmt::Display for FrameInfo<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + if tcx.def_key(self.instance.def_id()).disambiguated_data.data + == DefPathData::ClosureExpr + { + write!(f, "inside call to closure")?; + } else { + write!(f, "inside call to `{}`", self.instance)?; + } + if !self.call_site.is_dummy() { + let lo = tcx.sess.source_map().lookup_char_pos_adj(self.call_site.lo()); + write!(f, " at {}:{}:{}", lo.filename, lo.line, lo.col.to_usize() + 1)?; + } + Ok(()) + }) + } +} + impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { pub fn struct_error(&self, tcx: TyCtxtAt<'a, 'gcx, 'tcx>, message: &str) - -> Option> + -> Result, ErrorHandled> { self.struct_generic(tcx, message, None) } @@ -58,10 +94,14 @@ impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { pub fn report_as_error(&self, tcx: TyCtxtAt<'a, 'gcx, 'tcx>, message: &str - ) { + ) -> ErrorHandled { let err = self.struct_error(tcx, message); - if let Some(mut err) = err { - err.emit(); + match err { + Ok(mut err) => { + err.emit(); + ErrorHandled::Reported + }, + Err(err) => err, } } @@ -69,14 +109,18 @@ impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { tcx: TyCtxtAt<'a, 'gcx, 'tcx>, message: &str, lint_root: ast::NodeId, - ) { + ) -> ErrorHandled { let lint = self.struct_generic( tcx, message, Some(lint_root), ); - if let Some(mut lint) = lint { - lint.emit(); + match lint { + Ok(mut lint) => { + lint.emit(); + ErrorHandled::Reported + }, + Err(err) => err, } } @@ -85,15 +129,12 @@ impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { tcx: TyCtxtAt<'a, 'gcx, 'tcx>, message: &str, lint_root: Option, - ) -> Option> { - match self.error.kind { - ::mir::interpret::EvalErrorKind::TypeckError | - ::mir::interpret::EvalErrorKind::TooGeneric | - ::mir::interpret::EvalErrorKind::CheckMatchError | - ::mir::interpret::EvalErrorKind::Layout(_) => return None, - ::mir::interpret::EvalErrorKind::ReferencedConstant(ref inner) => { - inner.struct_generic(tcx, "referenced constant has errors", lint_root)?.emit(); - }, + ) -> Result, ErrorHandled> { + match self.error { + EvalErrorKind::Layout(LayoutError::Unknown(_)) | + EvalErrorKind::TooGeneric => return Err(ErrorHandled::TooGeneric), + EvalErrorKind::Layout(LayoutError::SizeOverflow(_)) | + EvalErrorKind::TypeckError => return Err(ErrorHandled::Reported), _ => {}, } trace!("reporting const eval failure at {:?}", self.span); @@ -114,10 +155,15 @@ impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { struct_error(tcx, message) }; err.span_label(self.span, self.error.to_string()); - for FrameInfo { span, location, .. } in &self.stacktrace { - err.span_label(*span, format!("inside call to `{}`", location)); + // Skip the last, which is just the environment of the constant. The stacktrace + // is sometimes empty because we create "fake" eval contexts in CTFE to do work + // on constant values. + if self.stacktrace.len() > 0 { + for frame_info in &self.stacktrace[..self.stacktrace.len()-1] { + err.span_label(frame_info.call_site, frame_info.to_string()); + } } - Some(err) + Ok(err) } } @@ -128,50 +174,81 @@ pub fn struct_error<'a, 'gcx, 'tcx>( struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg) } -#[derive(Debug, Clone, RustcEncodable, RustcDecodable)] +#[derive(Debug, Clone)] pub struct EvalError<'tcx> { pub kind: EvalErrorKind<'tcx, u64>, + pub backtrace: Option>, +} + +impl<'tcx> EvalError<'tcx> { + pub fn print_backtrace(&mut self) { + if let Some(ref mut backtrace) = self.backtrace { + eprintln!("{}", print_backtrace(&mut *backtrace)); + } + } +} + +fn print_backtrace(backtrace: &mut Backtrace) -> String { + use std::fmt::Write; + + backtrace.resolve(); + + let mut trace_text = "\n\nAn error occurred in miri:\n".to_string(); + write!(trace_text, "backtrace frames: {}\n", backtrace.frames().len()).unwrap(); + 'frames: for (i, frame) in backtrace.frames().iter().enumerate() { + if frame.symbols().is_empty() { + write!(trace_text, " {}: no symbols\n", i).unwrap(); + } + let mut first = true; + for symbol in frame.symbols() { + if first { + write!(trace_text, " {}: ", i).unwrap(); + first = false; + } else { + let len = i.to_string().len(); + write!(trace_text, " {} ", " ".repeat(len)).unwrap(); + } + if let Some(name) = symbol.name() { + write!(trace_text, "{}\n", name).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + write!(trace_text, " at ").unwrap(); + if let Some(file_path) = symbol.filename() { + write!(trace_text, "{}", file_path.display()).unwrap(); + } else { + write!(trace_text, "").unwrap(); + } + if let Some(line) = symbol.lineno() { + write!(trace_text, ":{}\n", line).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + } + } + trace_text } impl<'tcx> From> for EvalError<'tcx> { fn from(kind: EvalErrorKind<'tcx, u64>) -> Self { - match env::var("MIRI_BACKTRACE") { - Ok(ref val) if !val.is_empty() => { - let backtrace = Backtrace::new(); + let backtrace = match env::var("RUST_CTFE_BACKTRACE") { + // matching RUST_BACKTRACE, we treat "0" the same as "not present". + Ok(ref val) if val != "0" => { + let mut backtrace = Backtrace::new_unresolved(); - use std::fmt::Write; - let mut trace_text = "\n\nAn error occurred in miri:\n".to_string(); - write!(trace_text, "backtrace frames: {}\n", backtrace.frames().len()).unwrap(); - 'frames: for (i, frame) in backtrace.frames().iter().enumerate() { - if frame.symbols().is_empty() { - write!(trace_text, "{}: no symbols\n", i).unwrap(); - } - for symbol in frame.symbols() { - write!(trace_text, "{}: ", i).unwrap(); - if let Some(name) = symbol.name() { - write!(trace_text, "{}\n", name).unwrap(); - } else { - write!(trace_text, "\n").unwrap(); - } - write!(trace_text, "\tat ").unwrap(); - if let Some(file_path) = symbol.filename() { - write!(trace_text, "{}", file_path.display()).unwrap(); - } else { - write!(trace_text, "").unwrap(); - } - if let Some(line) = symbol.lineno() { - write!(trace_text, ":{}\n", line).unwrap(); - } else { - write!(trace_text, "\n").unwrap(); - } - } + if val == "immediate" { + // Print it now + eprintln!("{}", print_backtrace(&mut backtrace)); + None + } else { + Some(Box::new(backtrace)) } - error!("{}", trace_text); }, - _ => {}, - } + _ => None, + }; EvalError { kind, + backtrace, } } } @@ -195,10 +272,10 @@ pub enum EvalErrorKind<'tcx, O> { InvalidMemoryAccess, InvalidFunctionPointer, InvalidBool, - InvalidDiscriminant(u128), + InvalidDiscriminant(ScalarMaybeUndef), PointerOutOfBounds { ptr: Pointer, - access: bool, + check: InboundsCheck, allocation_size: Size, }, InvalidNullPointerUsage, @@ -227,33 +304,11 @@ pub enum EvalErrorKind<'tcx, O> { required: Align, has: Align, }, - MemoryLockViolation { - ptr: Pointer, - len: u64, - frame: usize, - access: AccessKind, - lock: Lock, - }, - MemoryAcquireConflict { - ptr: Pointer, - len: u64, - kind: AccessKind, - lock: Lock, - }, - InvalidMemoryLockRelease { - ptr: Pointer, - len: u64, - frame: usize, - lock: Lock, - }, - DeallocatedLockedMemory { - ptr: Pointer, - lock: Lock, - }, ValidationFailure(String), CalledClosureAsFunction, VtableForArgumentlessMethod, ModifiedConstantMemory, + ModifiedStatic, AssumptionNotHeld, InlineAsm, TypeNotPrimitive(Ty<'tcx>), @@ -279,10 +334,9 @@ pub enum EvalErrorKind<'tcx, O> { TypeckError, /// Resolution can fail if we are in a too generic context TooGeneric, - CheckMatchError, /// Cannot compute this constant because it depends on another one /// which already produced an error - ReferencedConstant(Lrc>), + ReferencedConstant, GeneratorResumedAfterReturn, GeneratorResumedAfterPanic, InfiniteLoop, @@ -314,16 +368,8 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { "pointer offset outside bounds of allocation", InvalidNullPointerUsage => "invalid use of NULL pointer", - MemoryLockViolation { .. } => - "memory access conflicts with lock", - MemoryAcquireConflict { .. } => - "new memory lock conflicts with existing lock", ValidationFailure(..) => "type validation failed", - InvalidMemoryLockRelease { .. } => - "invalid attempt to release write lock", - DeallocatedLockedMemory { .. } => - "tried to deallocate memory in conflict with a lock", ReadPointerAsBytes => "a raw memory access tried to access part of a pointer value as raw bytes", ReadBytesAsPointer => @@ -367,6 +413,8 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { "tried to call a vtable function without arguments", ModifiedConstantMemory => "tried to modify constant memory", + ModifiedStatic => + "tried to modify a static's initial value from another static's initializer", AssumptionNotHeld => "`assume` argument was false", InlineAsm => @@ -407,9 +455,7 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { "encountered constants with type errors, stopping evaluation", TooGeneric => "encountered overly generic constant", - CheckMatchError => - "match checking failed", - ReferencedConstant(_) => + ReferencedConstant => "referenced constant has errors", Overflow(mir::BinOp::Add) => "attempt to add with overflow", Overflow(mir::BinOp::Sub) => "attempt to subtract with overflow", @@ -432,7 +478,13 @@ impl<'tcx, O> EvalErrorKind<'tcx, O> { impl<'tcx> fmt::Display for EvalError<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self.kind) + write!(f, "{}", self.kind) + } +} + +impl<'tcx> fmt::Display for EvalErrorKind<'tcx, u64> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) } } @@ -440,27 +492,15 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use self::EvalErrorKind::*; match *self { - PointerOutOfBounds { ptr, access, allocation_size } => { - write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}", - if access { "memory access" } else { "pointer computed" }, + PointerOutOfBounds { ptr, check, allocation_size } => { + write!(f, "Pointer must be in-bounds{} at offset {}, but is outside bounds of \ + allocation {} which has size {}", + match check { + InboundsCheck::Live => " and live", + InboundsCheck::MaybeDead => "", + }, ptr.offset.bytes(), ptr.alloc_id, allocation_size.bytes()) }, - MemoryLockViolation { ptr, len, frame, access, ref lock } => { - write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}", - access, frame, ptr, len, lock) - } - MemoryAcquireConflict { ptr, len, kind, ref lock } => { - write!(f, "new {:?} lock at {:?}, size {}, is in conflict with lock {:?}", - kind, ptr, len, lock) - } - InvalidMemoryLockRelease { ptr, len, frame, ref lock } => { - write!(f, "frame {} tried to release memory write lock at {:?}, size {}, but \ - cannot release lock {:?}", frame, ptr, len, lock) - } - DeallocatedLockedMemory { ptr, ref lock } => { - write!(f, "tried to deallocate memory at {:?} in conflict with lock {:?}", - ptr, lock) - } ValidationFailure(ref err) => { write!(f, "type validation failed: {}", err) } @@ -490,7 +530,7 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), AlignmentCheckFailed { required, has } => write!(f, "tried to access memory with alignment {}, but alignment {} is required", - has.abi(), required.abi()), + has.bytes(), required.bytes()), TypeNotPrimitive(ty) => write!(f, "expected primitive type, got {}", ty), Layout(ref err) => @@ -500,8 +540,9 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { MachineError(ref inner) => write!(f, "{}", inner), IncorrectAllocationInformation(size, size2, align, align2) => - write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and \ - align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()), + write!(f, "incorrect alloc info: expected size {} and align {}, \ + got size {} and align {}", + size.bytes(), align.bytes(), size2.bytes(), align2.bytes()), Panic { ref msg, line, col, ref file } => write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col), InvalidDiscriminant(val) => diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 5054f52277..9369b6e56f 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -17,27 +17,32 @@ macro_rules! err { mod error; mod value; +mod allocation; +mod pointer; pub use self::error::{ EvalError, EvalResult, EvalErrorKind, AssertMessage, ConstEvalErr, struct_error, - FrameInfo, ConstEvalResult, + FrameInfo, ConstEvalRawResult, ConstEvalResult, ErrorHandled, }; -pub use self::value::{Scalar, ConstValue}; +pub use self::value::{Scalar, ScalarMaybeUndef, RawConst, ConstValue}; + +pub use self::allocation::{ + InboundsCheck, Allocation, AllocationExtra, + Relocations, UndefMask, +}; + +pub use self::pointer::{Pointer, PointerArithmetic}; use std::fmt; use mir; use hir::def_id::DefId; use ty::{self, TyCtxt, Instance}; -use ty::layout::{self, Align, HasDataLayout, Size}; +use ty::layout::{self, Size}; use middle::region; -use std::iter; use std::io; -use std::ops::{Deref, DerefMut}; use std::hash::Hash; -use syntax::ast::Mutability; use rustc_serialize::{Encoder, Decodable, Encodable}; -use rustc_data_structures::sorted_map::SortedMap; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::{Lock as Mutex, HashMapExt}; use rustc_data_structures::tiny_list::TinyList; @@ -78,145 +83,6 @@ pub struct GlobalId<'tcx> { pub promoted: Option, } -//////////////////////////////////////////////////////////////////////////////// -// Pointer arithmetic -//////////////////////////////////////////////////////////////////////////////// - -pub trait PointerArithmetic: layout::HasDataLayout { - // These are not supposed to be overridden. - - #[inline(always)] - fn pointer_size(self) -> Size { - self.data_layout().pointer_size - } - - //// Trunace the given value to the pointer size; also return whether there was an overflow - fn truncate_to_ptr(self, val: u128) -> (u64, bool) { - let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); - ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) - } - - // Overflow checking only works properly on the range from -u64 to +u64. - fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) { - // FIXME: is it possible to over/underflow here? - if i < 0 { - // trickery to ensure that i64::min_value() works fine - // this formula only works for true negative values, it panics for zero! - let n = u64::max_value() - (i as u64) + 1; - val.overflowing_sub(n) - } else { - self.overflowing_offset(val, i as u64) - } - } - - fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) { - let (res, over1) = val.overflowing_add(i); - let (res, over2) = self.truncate_to_ptr(res as u128); - (res, over1 || over2) - } - - fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { - let (res, over) = self.overflowing_signed_offset(val, i as i128); - if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } - } - - fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { - let (res, over) = self.overflowing_offset(val, i); - if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } - } - - fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { - self.overflowing_signed_offset(val, i as i128).0 - } -} - -impl PointerArithmetic for T {} - - -/// Pointer is generic over the type that represents a reference to Allocations, -/// thus making it possible for the most convenient representation to be used in -/// each context. -/// -/// Defaults to the index based and loosely coupled AllocId. -/// -/// Pointer is also generic over the `Tag` associated with each pointer, -/// which is used to do provenance tracking during execution. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub struct Pointer { - pub alloc_id: Id, - pub offset: Size, - pub tag: Tag, -} - -/// Produces a `Pointer` which points to the beginning of the Allocation -impl From for Pointer { - #[inline(always)] - fn from(alloc_id: AllocId) -> Self { - Pointer::new(alloc_id, Size::ZERO) - } -} - -impl<'tcx> Pointer<()> { - #[inline(always)] - pub fn new(alloc_id: AllocId, offset: Size) -> Self { - Pointer { alloc_id, offset, tag: () } - } - - #[inline(always)] - pub fn with_default_tag(self) -> Pointer - where Tag: Default - { - Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) - } -} - -impl<'tcx, Tag> Pointer { - #[inline(always)] - pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { - Pointer { alloc_id, offset, tag } - } - - pub fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { - Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), - self.tag, - ) - } - - pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); - (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) - } - - pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { - Ok(Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), - self.tag, - )) - } - - pub fn overflowing_offset(self, i: Size, cx: C) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); - (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) - } - - pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { - Ok(Pointer::new_with_tag( - self.alloc_id, - Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), - self.tag - )) - } - - #[inline] - pub fn erase_tag(self) -> Pointer { - Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } - } -} - - #[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] pub struct AllocId(pub u64); @@ -295,12 +161,10 @@ impl AllocDecodingState { } pub fn new(data_offsets: Vec) -> AllocDecodingState { - let decoding_state: Vec<_> = ::std::iter::repeat(Mutex::new(State::Empty)) - .take(data_offsets.len()) - .collect(); + let decoding_state = vec![Mutex::new(State::Empty); data_offsets.len()]; AllocDecodingState { - decoding_state: decoding_state, + decoding_state, data_offsets, } } @@ -523,91 +387,6 @@ impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> { } } -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -pub struct Allocation { - /// The actual bytes of the allocation. - /// Note that the bytes of a pointer represent the offset of the pointer - pub bytes: Vec, - /// Maps from byte addresses to extra data for each pointer. - /// Only the first byte of a pointer is inserted into the map; i.e., - /// every entry in this map applies to `pointer_size` consecutive bytes starting - /// at the given offset. - pub relocations: Relocations, - /// Denotes undefined memory. Reading from undefined memory is forbidden in miri - pub undef_mask: UndefMask, - /// The alignment of the allocation to detect unaligned reads. - pub align: Align, - /// Whether the allocation is mutable. - /// Also used by codegen to determine if a static should be put into mutable memory, - /// which happens for `static mut` and `static` with interior mutability. - pub mutability: Mutability, - /// Extra state for the machine. - pub extra: Extra, -} - -impl Allocation { - /// Creates a read-only allocation initialized by the given bytes - pub fn from_bytes(slice: &[u8], align: Align) -> Self { - let mut undef_mask = UndefMask::new(Size::ZERO); - undef_mask.grow(Size::from_bytes(slice.len() as u64), true); - Self { - bytes: slice.to_owned(), - relocations: Relocations::new(), - undef_mask, - align, - mutability: Mutability::Immutable, - extra: Extra::default(), - } - } - - pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self { - Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap()) - } - - pub fn undef(size: Size, align: Align) -> Self { - assert_eq!(size.bytes() as usize as u64, size.bytes()); - Allocation { - bytes: vec![0; size.bytes() as usize], - relocations: Relocations::new(), - undef_mask: UndefMask::new(size), - align, - mutability: Mutability::Mutable, - extra: Extra::default(), - } - } -} - -impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct Relocations(SortedMap); - -impl Relocations { - pub fn new() -> Self { - Relocations(SortedMap::new()) - } - - // The caller must guarantee that the given relocations are already sorted - // by address and contain no duplicates. - pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { - Relocations(SortedMap::from_presorted_elements(r)) - } -} - -impl Deref for Relocations { - type Target = SortedMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for Relocations { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - //////////////////////////////////////////////////////////////////////////////// // Methods to access integers in the target endianness //////////////////////////////////////////////////////////////////////////////// @@ -650,103 +429,3 @@ pub fn truncate(value: u128, size: Size) -> u128 { // truncate (shift left to drop out leftover values, shift right to fill with zeroes) (value << shift) >> shift } - -//////////////////////////////////////////////////////////////////////////////// -// Undefined byte tracking -//////////////////////////////////////////////////////////////////////////////// - -type Block = u64; -const BLOCK_SIZE: u64 = 64; - -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] -pub struct UndefMask { - blocks: Vec, - len: Size, -} - -impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); - -impl UndefMask { - pub fn new(size: Size) -> Self { - let mut m = UndefMask { - blocks: vec![], - len: Size::ZERO, - }; - m.grow(size, false); - m - } - - /// Check whether the range `start..end` (end-exclusive) is entirely defined. - /// - /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte - /// at which the first undefined access begins. - #[inline] - pub fn is_range_defined(&self, start: Size, end: Size) -> Result<(), Size> { - if end > self.len { - return Err(self.len); - } - - let idx = (start.bytes()..end.bytes()) - .map(|i| Size::from_bytes(i)) - .find(|&i| !self.get(i)); - - match idx { - Some(idx) => Err(idx), - None => Ok(()) - } - } - - pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { - let len = self.len; - if end > len { - self.grow(end - len, new_state); - } - self.set_range_inbounds(start, end, new_state); - } - - pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { - for i in start.bytes()..end.bytes() { - self.set(Size::from_bytes(i), new_state); - } - } - - #[inline] - pub fn get(&self, i: Size) -> bool { - let (block, bit) = bit_index(i); - (self.blocks[block] & 1 << bit) != 0 - } - - #[inline] - pub fn set(&mut self, i: Size, new_state: bool) { - let (block, bit) = bit_index(i); - if new_state { - self.blocks[block] |= 1 << bit; - } else { - self.blocks[block] &= !(1 << bit); - } - } - - pub fn grow(&mut self, amount: Size, new_state: bool) { - let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); - if amount.bytes() > unused_trailing_bits { - let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; - assert_eq!(additional_blocks as usize as u64, additional_blocks); - self.blocks.extend( - iter::repeat(0).take(additional_blocks as usize), - ); - } - let start = self.len; - self.len += amount; - self.set_range_inbounds(start, start + amount, new_state); - } -} - -#[inline] -fn bit_index(bits: Size) -> (usize, usize) { - let bits = bits.bytes(); - let a = bits / BLOCK_SIZE; - let b = bits % BLOCK_SIZE; - assert_eq!(a as usize as u64, a); - assert_eq!(b as usize as u64, b); - (a as usize, b as usize) -} diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs new file mode 100644 index 0000000000..a046825f08 --- /dev/null +++ b/src/librustc/mir/interpret/pointer.rs @@ -0,0 +1,168 @@ +use mir; +use ty::layout::{self, HasDataLayout, Size}; + +use super::{ + AllocId, EvalResult, InboundsCheck, +}; + +//////////////////////////////////////////////////////////////////////////////// +// Pointer arithmetic +//////////////////////////////////////////////////////////////////////////////// + +pub trait PointerArithmetic: layout::HasDataLayout { + // These are not supposed to be overridden. + + #[inline(always)] + fn pointer_size(&self) -> Size { + self.data_layout().pointer_size + } + + //// Trunace the given value to the pointer size; also return whether there was an overflow + #[inline] + fn truncate_to_ptr(&self, val: u128) -> (u64, bool) { + let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); + ((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1) + } + + #[inline] + fn offset<'tcx>(&self, val: u64, i: u64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_offset(val, i); + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } + } + + #[inline] + fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) { + let (res, over1) = val.overflowing_add(i); + let (res, over2) = self.truncate_to_ptr(u128::from(res)); + (res, over1 || over2) + } + + #[inline] + fn signed_offset<'tcx>(&self, val: u64, i: i64) -> EvalResult<'tcx, u64> { + let (res, over) = self.overflowing_signed_offset(val, i128::from(i)); + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } + } + + // Overflow checking only works properly on the range from -u64 to +u64. + #[inline] + fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) { + // FIXME: is it possible to over/underflow here? + if i < 0 { + // trickery to ensure that i64::min_value() works fine + // this formula only works for true negative values, it panics for zero! + let n = u64::max_value() - (i as u64) + 1; + val.overflowing_sub(n) + } else { + self.overflowing_offset(val, i as u64) + } + } +} + +impl PointerArithmetic for T {} + + +/// Pointer is generic over the type that represents a reference to Allocations, +/// thus making it possible for the most convenient representation to be used in +/// each context. +/// +/// Defaults to the index based and loosely coupled AllocId. +/// +/// Pointer is also generic over the `Tag` associated with each pointer, +/// which is used to do provenance tracking during execution. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub struct Pointer { + pub alloc_id: Id, + pub offset: Size, + pub tag: Tag, +} + +/// Produces a `Pointer` which points to the beginning of the Allocation +impl From for Pointer { + #[inline(always)] + fn from(alloc_id: AllocId) -> Self { + Pointer::new(alloc_id, Size::ZERO) + } +} + +impl<'tcx> Pointer<()> { + #[inline(always)] + pub fn new(alloc_id: AllocId, offset: Size) -> Self { + Pointer { alloc_id, offset, tag: () } + } + + #[inline(always)] + pub fn with_default_tag(self) -> Pointer + where Tag: Default + { + Pointer::new_with_tag(self.alloc_id, self.offset, Default::default()) + } +} + +impl<'tcx, Tag> Pointer { + #[inline(always)] + pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { + Pointer { alloc_id, offset, tag } + } + + #[inline] + pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { + Ok(Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), + self.tag + )) + } + + #[inline] + pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) + } + + #[inline(always)] + pub fn wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self { + self.overflowing_offset(i, cx).0 + } + + #[inline] + pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { + Ok(Pointer::new_with_tag( + self.alloc_id, + Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), + self.tag, + )) + } + + #[inline] + pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); + (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) + } + + #[inline(always)] + pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { + self.overflowing_signed_offset(i128::from(i), cx).0 + } + + #[inline(always)] + pub fn erase_tag(self) -> Pointer { + Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } + } + + #[inline(always)] + pub fn check_in_alloc( + self, + allocation_size: Size, + check: InboundsCheck, + ) -> EvalResult<'tcx, ()> { + if self.offset > allocation_size { + err!(PointerOutOfBounds { + ptr: self.erase_tag(), + check, + allocation_size, + }) + } else { + Ok(()) + } + } +} diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 4304f08a78..4bcba9d546 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -8,14 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(unknown_lints)] +use std::fmt; -use ty::layout::{HasDataLayout, Size}; -use ty::subst::Substs; -use hir::def_id::DefId; +use crate::ty::{Ty, subst::Substs, layout::{HasDataLayout, Size}}; +use crate::hir::def_id::DefId; use super::{EvalResult, Pointer, PointerArithmetic, Allocation, AllocId, sign_extend, truncate}; +/// Represents the result of a raw const operation, pre-validation. +#[derive(Copy, Clone, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Hash)] +pub struct RawConst<'tcx> { + // the value lives here, at offset 0, and that allocation definitely is a `AllocType::Memory` + // (so you can use `AllocMap::unwrap_memory`). + pub alloc_id: AllocId, + pub ty: Ty<'tcx>, +} + /// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which /// matches the LocalValue optimizations for easy conversions between Value and ConstValue. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)] @@ -23,6 +31,7 @@ pub enum ConstValue<'tcx> { /// Never returned from the `const_eval` query, but the HIR contains these frequently in order /// to allow HIR creation to happen for everything before needing to be able to run constant /// evaluation + /// FIXME: The query should then return a type that does not even have this variant. Unevaluated(DefId, &'tcx Substs<'tcx>), /// Used only for types with layout::abi::Scalar ABI and ZSTs @@ -65,7 +74,7 @@ impl<'tcx> ConstValue<'tcx> { pub fn new_slice( val: Scalar, len: u64, - cx: impl HasDataLayout + cx: &impl HasDataLayout ) -> Self { ConstValue::ScalarPair(val, Scalar::Bits { bits: len as u128, @@ -99,6 +108,15 @@ pub enum Scalar { Ptr(Pointer), } +impl fmt::Display for Scalar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Scalar::Ptr(_) => write!(f, "a pointer"), + Scalar::Bits { bits, .. } => write!(f, "{}", bits), + } + } +} + impl<'tcx> Scalar<()> { #[inline] pub fn with_default_tag(self) -> Scalar @@ -121,7 +139,7 @@ impl<'tcx, Tag> Scalar { } #[inline] - pub fn ptr_null(cx: impl HasDataLayout) -> Self { + pub fn ptr_null(cx: &impl HasDataLayout) -> Self { Scalar::Bits { bits: 0, size: cx.data_layout().pointer_size.bytes() as u8, @@ -134,52 +152,80 @@ impl<'tcx, Tag> Scalar { } #[inline] - pub fn ptr_signed_offset(self, i: i64, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { - let layout = cx.data_layout(); + pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { + let dl = cx.data_layout(); match self { Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.pointer_size.bytes()); + assert_eq!(size as u64, dl.pointer_size.bytes()); Ok(Scalar::Bits { - bits: layout.signed_offset(bits as u64, i)? as u128, + bits: dl.offset(bits as u64, i.bytes())? as u128, size, }) } - Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr), + Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr), } } #[inline] - pub fn ptr_offset(self, i: Size, cx: impl HasDataLayout) -> EvalResult<'tcx, Self> { - let layout = cx.data_layout(); + pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self { + let dl = cx.data_layout(); match self { Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.pointer_size.bytes()); - Ok(Scalar::Bits { - bits: layout.offset(bits as u64, i.bytes())? as u128, - size, - }) - } - Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr), - } - } - - #[inline] - pub fn ptr_wrapping_signed_offset(self, i: i64, cx: impl HasDataLayout) -> Self { - let layout = cx.data_layout(); - match self { - Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.pointer_size.bytes()); + assert_eq!(size as u64, dl.pointer_size.bytes()); Scalar::Bits { - bits: layout.wrapping_signed_offset(bits as u64, i) as u128, + bits: dl.overflowing_offset(bits as u64, i.bytes()).0 as u128, size, } } - Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, layout)), + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)), } } #[inline] - pub fn is_null_ptr(self, cx: impl HasDataLayout) -> bool { + pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> EvalResult<'tcx, Self> { + let dl = cx.data_layout(); + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, dl.pointer_size().bytes()); + Ok(Scalar::Bits { + bits: dl.signed_offset(bits as u64, i)? as u128, + size, + }) + } + Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr), + } + } + + #[inline] + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { + let dl = cx.data_layout(); + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, dl.pointer_size.bytes()); + Scalar::Bits { + bits: dl.overflowing_signed_offset(bits as u64, i128::from(i)).0 as u128, + size, + } + } + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, dl)), + } + } + + /// Returns this pointers offset from the allocation base, or from NULL (for + /// integer pointers). + #[inline] + pub fn get_ptr_offset(self, cx: &impl HasDataLayout) -> Size { + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, cx.pointer_size().bytes()); + Size::from_bytes(bits as u64) + } + Scalar::Ptr(ptr) => ptr.offset, + } + } + + #[inline] + pub fn is_null_ptr(self, cx: &impl HasDataLayout) -> bool { match self { Scalar::Bits { bits, size } => { assert_eq!(size as u64, cx.data_layout().pointer_size.bytes()); @@ -301,7 +347,7 @@ impl<'tcx, Tag> Scalar { Ok(b as u64) } - pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'static, u64> { + pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'static, u64> { let b = self.to_bits(cx.data_layout().pointer_size)?; assert_eq!(b as u64 as u128, b); Ok(b as u64) @@ -331,7 +377,7 @@ impl<'tcx, Tag> Scalar { Ok(b as i64) } - pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'static, i64> { + pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'static, i64> { let b = self.to_bits(cx.data_layout().pointer_size)?; let b = sign_extend(b, cx.data_layout().pointer_size) as i128; assert_eq!(b as i64 as i128, b); @@ -355,3 +401,131 @@ impl From> for Scalar { Scalar::Ptr(ptr) } } + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum ScalarMaybeUndef { + Scalar(Scalar), + Undef, +} + +impl From> for ScalarMaybeUndef { + #[inline(always)] + fn from(s: Scalar) -> Self { + ScalarMaybeUndef::Scalar(s) + } +} + +impl fmt::Display for ScalarMaybeUndef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ScalarMaybeUndef::Undef => write!(f, "uninitialized bytes"), + ScalarMaybeUndef::Scalar(s) => write!(f, "{}", s), + } + } +} + +impl<'tcx> ScalarMaybeUndef<()> { + #[inline] + pub fn with_default_tag(self) -> ScalarMaybeUndef + where Tag: Default + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } +} + +impl<'tcx, Tag> ScalarMaybeUndef { + #[inline] + pub fn erase_tag(self) -> ScalarMaybeUndef + { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } + + #[inline] + pub fn not_undef(self) -> EvalResult<'static, Scalar> { + match self { + ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), + ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), + } + } + + #[inline(always)] + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { + self.not_undef()?.to_ptr() + } + + #[inline(always)] + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { + self.not_undef()?.to_bits(target_size) + } + + #[inline(always)] + pub fn to_bool(self) -> EvalResult<'tcx, bool> { + self.not_undef()?.to_bool() + } + + #[inline(always)] + pub fn to_char(self) -> EvalResult<'tcx, char> { + self.not_undef()?.to_char() + } + + #[inline(always)] + pub fn to_f32(self) -> EvalResult<'tcx, f32> { + self.not_undef()?.to_f32() + } + + #[inline(always)] + pub fn to_f64(self) -> EvalResult<'tcx, f64> { + self.not_undef()?.to_f64() + } + + #[inline(always)] + pub fn to_u8(self) -> EvalResult<'tcx, u8> { + self.not_undef()?.to_u8() + } + + #[inline(always)] + pub fn to_u32(self) -> EvalResult<'tcx, u32> { + self.not_undef()?.to_u32() + } + + #[inline(always)] + pub fn to_u64(self) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_u64() + } + + #[inline(always)] + pub fn to_usize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> { + self.not_undef()?.to_usize(cx) + } + + #[inline(always)] + pub fn to_i8(self) -> EvalResult<'tcx, i8> { + self.not_undef()?.to_i8() + } + + #[inline(always)] + pub fn to_i32(self) -> EvalResult<'tcx, i32> { + self.not_undef()?.to_i32() + } + + #[inline(always)] + pub fn to_i64(self) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_i64() + } + + #[inline(always)] + pub fn to_isize(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, i64> { + self.not_undef()?.to_isize(cx) + } +} + +impl_stable_hash_for!(enum ::mir::interpret::ScalarMaybeUndef { + Scalar(v), + Undef +}); diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 952783a91b..368f83eb61 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -10,16 +10,16 @@ //! MIR datatypes and passes. See the [rustc guide] for more info. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/index.html +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/mir/index.html use hir::def::CtorKind; use hir::def_id::DefId; use hir::{self, HirId, InlineAsm}; -use middle::region; use mir::interpret::{ConstValue, EvalErrorKind, Scalar}; use mir::visit::MirVisitable; use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; +use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::graph::dominators::{dominators, Dominators}; use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; @@ -39,6 +39,7 @@ use syntax_pos::{Span, DUMMY_SP}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use ty::subst::{CanonicalUserSubsts, Subst, Substs}; use ty::{self, AdtDef, CanonicalTy, ClosureSubsts, GeneratorSubsts, Region, Ty, TyCtxt}; +use ty::layout::VariantIdx; use util::ppaux; pub use mir::interpret::AssertMessage; @@ -69,6 +70,24 @@ impl<'tcx> HasLocalDecls<'tcx> for Mir<'tcx> { } } +/// The various "big phases" that MIR goes through. +/// +/// Warning: ordering of variants is significant +#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub enum MirPhase { + Build = 0, + Const = 1, + Validated = 2, + Optimized = 3, +} + +impl MirPhase { + /// Gets the index of the current MirPhase within the set of all MirPhases. + pub fn phase_index(&self) -> usize { + *self as usize + } +} + /// Lowered representation of a single function. #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Mir<'tcx> { @@ -76,6 +95,13 @@ pub struct Mir<'tcx> { /// that indexes into this vector. basic_blocks: IndexVec>, + /// Records how far through the "desugaring and optimization" process this particular + /// MIR has traversed. This is particularly useful when inlining, since in that context + /// we instantiate the promoted constants and add them to our promoted vector -- but those + /// promoted items have already been optimized, whereas ours have not. This field allows + /// us to see the difference and forego optimization on the inlined promoted items. + pub phase: MirPhase, + /// List of source scopes; these are referenced by statements /// and used for debuginfo. Indexed by a `SourceScope`. pub source_scopes: IndexVec, @@ -151,6 +177,7 @@ impl<'tcx> Mir<'tcx> { ); Mir { + phase: MirPhase::Build, basic_blocks, source_scopes, source_scope_local_data, @@ -278,6 +305,20 @@ impl<'tcx> Mir<'tcx> { }) } + /// Returns an iterator over all user-declared mutable locals. + #[inline] + pub fn mut_vars_iter<'a>(&'a self) -> impl Iterator + 'a { + (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| { + let local = Local::new(index); + let decl = &self.local_decls[local]; + if decl.is_user_variable.is_some() && decl.mutability == Mutability::Mut { + Some(local) + } else { + None + } + }) + } + /// Returns an iterator over all user-declared mutable arguments and locals. #[inline] pub fn mut_vars_and_args_iter<'a>(&'a self) -> impl Iterator + 'a { @@ -368,6 +409,7 @@ pub enum Safety { } impl_stable_hash_for!(struct Mir<'tcx> { + phase, basic_blocks, source_scopes, source_scope_local_data, @@ -479,25 +521,25 @@ pub enum BorrowKind { /// implicit closure bindings. It is needed when the closure is /// borrowing or mutating a mutable referent, e.g.: /// - /// let x: &mut isize = ...; - /// let y = || *x += 5; + /// let x: &mut isize = ...; + /// let y = || *x += 5; /// /// If we were to try to translate this closure into a more explicit /// form, we'd encounter an error with the code as written: /// - /// struct Env { x: & &mut isize } - /// let x: &mut isize = ...; - /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn - /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// struct Env { x: & &mut isize } + /// let x: &mut isize = ...; + /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } /// /// This is then illegal because you cannot mutate an `&mut` found /// in an aliasable location. To solve, you'd have to translate with /// an `&mut` borrow: /// - /// struct Env { x: & &mut isize } - /// let x: &mut isize = ...; - /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x - /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// struct Env { x: & &mut isize } + /// let x: &mut isize = ...; + /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } /// /// Now the assignment to `**env.x` is legal, but creating a /// mutable pointer to `x` is not because `x` is not mutable. We @@ -616,6 +658,13 @@ impl_stable_hash_for!(enum self::ImplicitSelfKind { None }); +impl_stable_hash_for!(enum self::MirPhase { + Build, + Const, + Validated, + Optimized, +}); + mod binding_form_impl { use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; @@ -1670,14 +1719,14 @@ pub struct Statement<'tcx> { pub kind: StatementKind<'tcx>, } +// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert!(MEM_SIZE_OF_STATEMENT: mem::size_of::>() == 56); + impl<'tcx> Statement<'tcx> { /// Changes a statement to a nop. This is both faster than deleting instructions and avoids /// invalidating statement indices in `Location`s. pub fn make_nop(&mut self) { - // `Statement` contributes significantly to peak memory usage. Make - // sure it doesn't get bigger. - static_assert!(STATEMENT_IS_AT_MOST_56_BYTES: mem::size_of::>() <= 56); - self.kind = StatementKind::Nop } @@ -1704,7 +1753,7 @@ pub enum StatementKind<'tcx> { /// Write the discriminant for a variant to the enum Place. SetDiscriminant { place: Place<'tcx>, - variant_index: usize, + variant_index: VariantIdx, }, /// Start a live range for the storage of the local. @@ -1717,17 +1766,27 @@ pub enum StatementKind<'tcx> { InlineAsm { asm: Box, outputs: Box<[Place<'tcx>]>, - inputs: Box<[Operand<'tcx>]>, + inputs: Box<[(Span, Operand<'tcx>)]>, }, - /// Assert the given places to be valid inhabitants of their type. These statements are - /// currently only interpreted by miri and only generated when "-Z mir-emit-validate" is passed. - /// See for more details. - Validate(ValidationOp, Vec>>), + /// Retag references in the given place, ensuring they got fresh tags. This is + /// part of the Stacked Borrows model. These statements are currently only interpreted + /// by miri and only generated when "-Z mir-emit-retag" is passed. + /// See + /// for more details. + Retag { + /// `fn_entry` indicates whether this is the initial retag that happens in the + /// function prolog. + fn_entry: bool, + place: Place<'tcx>, + }, - /// Mark one terminating point of a region scope (i.e. static region). - /// (The starting point(s) arise implicitly from borrows.) - EndRegion(region::Scope), + /// Escape the given reference to a raw pointer, so that it can be accessed + /// without precise provenance tracking. These statements are currently only interpreted + /// by miri and only generated when "-Z mir-emit-retag" is passed. + /// See + /// for more details. + EscapeToRaw(Operand<'tcx>), /// Encodes a user's type ascription. These need to be preserved /// intact so that NLL can respect them. For example: @@ -1776,66 +1835,15 @@ pub enum FakeReadCause { ForLet, } -/// The `ValidationOp` describes what happens with each of the operands of a -/// `Validate` statement. -#[derive(Copy, Clone, RustcEncodable, RustcDecodable, PartialEq, Eq)] -pub enum ValidationOp { - /// Recursively traverse the place following the type and validate that all type - /// invariants are maintained. Furthermore, acquire exclusive/read-only access to the - /// memory reachable from the place. - Acquire, - /// Recursive traverse the *mutable* part of the type and relinquish all exclusive - /// access. - Release, - /// Recursive traverse the *mutable* part of the type and relinquish all exclusive - /// access *until* the given region ends. Then, access will be recovered. - Suspend(region::Scope), -} - -impl Debug for ValidationOp { - fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { - use self::ValidationOp::*; - match *self { - Acquire => write!(fmt, "Acquire"), - Release => write!(fmt, "Release"), - // (reuse lifetime rendering policy from ppaux.) - Suspend(ref ce) => write!(fmt, "Suspend({})", ty::ReScope(*ce)), - } - } -} - -// This is generic so that it can be reused by miri -#[derive(Clone, Hash, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub struct ValidationOperand<'tcx, T> { - pub place: T, - pub ty: Ty<'tcx>, - pub re: Option, - pub mutbl: hir::Mutability, -} - -impl<'tcx, T: Debug> Debug for ValidationOperand<'tcx, T> { - fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { - write!(fmt, "{:?}: {:?}", self.place, self.ty)?; - if let Some(ce) = self.re { - // (reuse lifetime rendering policy from ppaux.) - write!(fmt, "/{}", ty::ReScope(ce))?; - } - if let hir::MutImmutable = self.mutbl { - write!(fmt, " (imm)")?; - } - Ok(()) - } -} - impl<'tcx> Debug for Statement<'tcx> { fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { use self::StatementKind::*; match self.kind { Assign(ref place, ref rv) => write!(fmt, "{:?} = {:?}", place, rv), FakeRead(ref cause, ref place) => write!(fmt, "FakeRead({:?}, {:?})", cause, place), - // (reuse lifetime rendering policy from ppaux.) - EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)), - Validate(ref op, ref places) => write!(fmt, "Validate({:?}, {:?})", op, places), + Retag { fn_entry, ref place } => + write!(fmt, "Retag({}{:?})", if fn_entry { "[fn entry] " } else { "" }, place), + EscapeToRaw(ref place) => write!(fmt, "EscapeToRaw({:?})", place), StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place), StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place), SetDiscriminant { @@ -1933,7 +1941,7 @@ pub enum ProjectionElem<'tcx, V, T> { /// "Downcast" to a variant of an ADT. Currently, we only introduce /// this for ADTs with more than one variant. It may be better to /// just introduce it always, or always for enums. - Downcast(&'tcx AdtDef, usize), + Downcast(&'tcx AdtDef, VariantIdx), } /// Alias for projections as they appear in places, where the base is a place @@ -1944,6 +1952,11 @@ pub type PlaceProjection<'tcx> = Projection<'tcx, Place<'tcx>, Local, Ty<'tcx>>; /// and the index is a local. pub type PlaceElem<'tcx> = ProjectionElem<'tcx, Local, Ty<'tcx>>; +// at least on 64 bit systems, `PlaceElem` should not be larger than two pointers +static_assert!(PROJECTION_ELEM_IS_2_PTRS_LARGE: + mem::size_of::>() <= 16 +); + /// Alias for projections as they appear in `UserTypeProjection`, where we /// need neither the `V` parameter for `Index` nor the `T` for `Field`. pub type ProjectionKind<'tcx> = ProjectionElem<'tcx, (), ()>; @@ -1963,7 +1976,7 @@ impl<'tcx> Place<'tcx> { self.elem(ProjectionElem::Deref) } - pub fn downcast(self, adt_def: &'tcx AdtDef, variant_index: usize) -> Place<'tcx> { + pub fn downcast(self, adt_def: &'tcx AdtDef, variant_index: VariantIdx) -> Place<'tcx> { self.elem(ProjectionElem::Downcast(adt_def, variant_index)) } @@ -2187,7 +2200,7 @@ pub enum CastKind { /// "Unsize" -- convert a thin-or-fat pointer to a fat pointer. /// codegen must figure out the details once full monomorphization /// is known. For example, this could be used to cast from a - /// `&[i32;N]` to a `&[i32]`, or a `Box` to a `Box` + /// `&[i32;N]` to a `&[i32]`, or a `Box` to a `Box` /// (presuming `T: Trait`). Unsize, } @@ -2205,7 +2218,7 @@ pub enum AggregateKind<'tcx> { /// active field index would identity the field `c` Adt( &'tcx AdtDef, - usize, + VariantIdx, &'tcx Substs<'tcx>, Option>, Option, @@ -2605,7 +2618,7 @@ pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const<'_>) -> fmt::Resu _ => {} } } - // print function definitons + // print function definitions if let FnDef(did, _) = ty.sty { return write!(f, "{}", item_path_str(did)); } @@ -2715,6 +2728,36 @@ impl Location { } } + /// Returns `true` if `other` is earlier in the control flow graph than `self`. + pub fn is_predecessor_of<'tcx>(&self, other: Location, mir: &Mir<'tcx>) -> bool { + // If we are in the same block as the other location and are an earlier statement + // then we are a predecessor of `other`. + if self.block == other.block && self.statement_index < other.statement_index { + return true; + } + + // If we're in another block, then we want to check that block is a predecessor of `other`. + let mut queue: Vec = mir.predecessors_for(other.block).clone(); + let mut visited = FxHashSet::default(); + + while let Some(block) = queue.pop() { + // If we haven't visited this block before, then make sure we visit it's predecessors. + if visited.insert(block) { + queue.append(&mut mir.predecessors_for(block).clone()); + } else { + continue; + } + + // If we found the block that `self` is in, then we are a predecessor of `other` (since + // we found that block by looking at the predecessors of `other`). + if self.block == block { + return true; + } + } + + false + } + pub fn dominates(&self, other: Location, dominators: &Dominators) -> bool { if self.block == other.block { self.statement_index <= other.statement_index @@ -2905,11 +2948,11 @@ pub enum ClosureOutlivesSubject<'tcx> { CloneTypeFoldableAndLiftImpls! { BlockTailInfo, + MirPhase, Mutability, SourceInfo, UpvarDecl, FakeReadCause, - ValidationOp, SourceScope, SourceScopeData, SourceScopeLocalData, @@ -2917,6 +2960,7 @@ CloneTypeFoldableAndLiftImpls! { BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { + phase, basic_blocks, source_scopes, source_scope_local_data, @@ -2961,12 +3005,6 @@ BraceStructTypeFoldableImpl! { } } -BraceStructTypeFoldableImpl! { - impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> { - place, ty, re, mutbl - } -} - BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { source_info, kind @@ -2981,8 +3019,8 @@ EnumTypeFoldableImpl! { (StatementKind::StorageLive)(a), (StatementKind::StorageDead)(a), (StatementKind::InlineAsm) { asm, outputs, inputs }, - (StatementKind::Validate)(a, b), - (StatementKind::EndRegion)(a), + (StatementKind::Retag) { fn_entry, place }, + (StatementKind::EscapeToRaw)(place), (StatementKind::AscribeUserType)(a, v, b), (StatementKind::Nop), } @@ -3000,7 +3038,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { use mir::TerminatorKind::*; let kind = match self.kind { - Goto { target } => Goto { target: target }, + Goto { target } => Goto { target }, SwitchInt { ref discr, switch_ty, diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index 6958719536..baa88dba45 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -16,6 +16,7 @@ use mir::*; use ty::subst::{Subst, Substs}; use ty::{self, AdtDef, Ty, TyCtxt}; +use ty::layout::VariantIdx; use hir; use ty::util::IntTypeExt; @@ -27,12 +28,16 @@ pub enum PlaceTy<'tcx> { /// Downcast to a particular variant of an enum. Downcast { adt_def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>, - variant_index: usize }, + variant_index: VariantIdx }, } +static_assert!(PLACE_TY_IS_3_PTRS_LARGE: + mem::size_of::>() <= 24 +); + impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> { - PlaceTy::Ty { ty: ty } + PlaceTy::Ty { ty } } pub fn to_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { @@ -54,7 +59,7 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { pub fn field_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, f: &Field) -> Ty<'tcx> { // Pass `0` here so it can be used as a "default" variant_index in first arm below - let answer = match (self, 0) { + let answer = match (self, VariantIdx::new(0)) { (PlaceTy::Ty { ty: &ty::TyS { sty: ty::TyKind::Adt(adt_def, substs), .. } }, variant_index) | (PlaceTy::Downcast { adt_def, substs, variant_index }, _) => { @@ -134,7 +139,7 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { match self.to_ty(tcx).sty { ty::Adt(adt_def, substs) => { assert!(adt_def.is_enum()); - assert!(index < adt_def.variants.len()); + assert!(index.as_usize() < adt_def.variants.len()); assert_eq!(adt_def, adt_def1); PlaceTy::Downcast { adt_def, substs, diff --git a/src/librustc/mir/traversal.rs b/src/librustc/mir/traversal.rs index db1bc3e751..f3a0b7de90 100644 --- a/src/librustc/mir/traversal.rs +++ b/src/librustc/mir/traversal.rs @@ -34,6 +34,7 @@ pub struct Preorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, visited: BitSet, worklist: Vec, + root_is_start_block: bool, } impl<'a, 'tcx> Preorder<'a, 'tcx> { @@ -44,6 +45,7 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> { mir, visited: BitSet::new_empty(mir.basic_blocks().len()), worklist, + root_is_start_block: root == START_BLOCK, } } } @@ -75,15 +77,19 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { fn size_hint(&self) -> (usize, Option) { // All the blocks, minus the number of blocks we've visited. - let remaining = self.mir.basic_blocks().len() - self.visited.count(); + let upper = self.mir.basic_blocks().len() - self.visited.count(); - // We will visit all remaining blocks exactly once. - (remaining, Some(remaining)) + let lower = if self.root_is_start_block { + // We will visit all remaining blocks exactly once. + upper + } else { + self.worklist.len() + }; + + (lower, Some(upper)) } } -impl<'a, 'tcx> ExactSizeIterator for Preorder<'a, 'tcx> {} - /// Postorder traversal of a graph. /// /// Postorder traversal is when each node is visited after all of it's @@ -105,7 +111,8 @@ impl<'a, 'tcx> ExactSizeIterator for Preorder<'a, 'tcx> {} pub struct Postorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, visited: BitSet, - visit_stack: Vec<(BasicBlock, Successors<'a>)> + visit_stack: Vec<(BasicBlock, Successors<'a>)>, + root_is_start_block: bool, } impl<'a, 'tcx> Postorder<'a, 'tcx> { @@ -113,7 +120,8 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { let mut po = Postorder { mir, visited: BitSet::new_empty(mir.basic_blocks().len()), - visit_stack: Vec::new() + visit_stack: Vec::new(), + root_is_start_block: root == START_BLOCK, }; @@ -134,7 +142,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { // // It does the actual traversal of the graph, while the `next` method on the iterator // just pops off of the stack. `visit_stack` is a stack containing pairs of nodes and - // iterators over the sucessors of those nodes. Each iteration attempts to get the next + // iterators over the successors of those nodes. Each iteration attempts to get the next // node from the top of the stack, then pushes that node and an iterator over the // successors to the top of the stack. This loop only grows `visit_stack`, stopping when // we reach a child that has no children that we haven't already visited. @@ -155,7 +163,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { // The state of the stack starts out with just the root node (`A` in this case); // [(A, [B, C])] // - // When the first call to `traverse_sucessor` happens, the following happens: + // When the first call to `traverse_successor` happens, the following happens: // // [(B, [D]), // `B` taken from the successors of `A`, pushed to the // // top of the stack along with the successors of `B` @@ -214,15 +222,19 @@ impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { fn size_hint(&self) -> (usize, Option) { // All the blocks, minus the number of blocks we've visited. - let remaining = self.mir.basic_blocks().len() - self.visited.count(); + let upper = self.mir.basic_blocks().len() - self.visited.count(); - // We will visit all remaining blocks exactly once. - (remaining, Some(remaining)) + let lower = if self.root_is_start_block { + // We will visit all remaining blocks exactly once. + upper + } else { + self.visit_stack.len() + }; + + (lower, Some(upper)) } } -impl<'a, 'tcx> ExactSizeIterator for Postorder<'a, 'tcx> {} - /// Reverse postorder traversal of a graph /// /// Reverse postorder is the reverse order of a postorder traversal. diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index caa627441c..0c9b06a8d8 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -152,6 +152,13 @@ macro_rules! make_mir_visitor { self.super_ascribe_user_ty(place, variance, user_ty, location); } + fn visit_retag(&mut self, + fn_entry: & $($mutability)* bool, + place: & $($mutability)* Place<'tcx>, + location: Location) { + self.super_retag(fn_entry, place, location); + } + fn visit_place(&mut self, place: & $($mutability)* Place<'tcx>, context: PlaceContext<'tcx>, @@ -370,18 +377,6 @@ macro_rules! make_mir_visitor { location ); } - StatementKind::EndRegion(_) => {} - StatementKind::Validate(_, ref $($mutability)* places) => { - for operand in places { - self.visit_place( - & $($mutability)* operand.place, - PlaceContext::NonUse(NonUseContext::Validate), - location - ); - self.visit_ty(& $($mutability)* operand.ty, - TyContext::Location(location)); - } - } StatementKind::SetDiscriminant{ ref $($mutability)* place, .. } => { self.visit_place( place, @@ -389,6 +384,9 @@ macro_rules! make_mir_visitor { location ); } + StatementKind::EscapeToRaw(ref $($mutability)* op) => { + self.visit_operand(op, location); + } StatementKind::StorageLive(ref $($mutability)* local) => { self.visit_local( local, @@ -413,10 +411,15 @@ macro_rules! make_mir_visitor { location ); } - for input in & $($mutability)* inputs[..] { + for (span, input) in & $($mutability)* inputs[..] { + self.visit_span(span); self.visit_operand(input, location); } } + StatementKind::Retag { ref $($mutability)* fn_entry, + ref $($mutability)* place } => { + self.visit_retag(fn_entry, place, location); + } StatementKind::AscribeUserType( ref $($mutability)* place, ref $($mutability)* variance, @@ -719,6 +722,17 @@ macro_rules! make_mir_visitor { self.visit_user_type_projection(user_ty); } + fn super_retag(&mut self, + _fn_entry: & $($mutability)* bool, + place: & $($mutability)* Place<'tcx>, + location: Location) { + self.visit_place( + place, + PlaceContext::MutatingUse(MutatingUseContext::Retag), + location, + ); + } + fn super_place(&mut self, place: & $($mutability)* Place<'tcx>, context: PlaceContext<'tcx>, @@ -1010,6 +1024,8 @@ pub enum MutatingUseContext<'tcx> { /// f(&mut x.y); /// Projection, + /// Retagging, a "Stacked Borrows" shadow state operation + Retag, } #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -1020,8 +1036,6 @@ pub enum NonUseContext { StorageDead, /// User type annotation assertions for NLL. AscribeUserTy, - /// Validation command. - Validate, } #[derive(Copy, Clone, Debug, PartialEq, Eq)] diff --git a/src/librustc/session/code_stats.rs b/src/librustc/session/code_stats.rs index b1dcfdfcda..b8f5ce3cdb 100644 --- a/src/librustc/session/code_stats.rs +++ b/src/librustc/session/code_stats.rs @@ -71,7 +71,7 @@ impl CodeStats { let info = TypeSizeInfo { kind, type_description: type_desc.to_string(), - align: align.abi(), + align: align.bytes(), overall_size: overall_size.bytes(), packed: packed, opt_discr_size: opt_discr_size.map(|s| s.bytes()), diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 569e7a24d2..480d4a8e48 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -224,7 +224,7 @@ impl Default for ErrorOutputType { // Use tree-based collections to cheaply get a deterministic Hash implementation. // DO NOT switch BTreeMap out for an unsorted container type! That would break -// dependency tracking for commandline arguments. +// dependency tracking for command-line arguments. #[derive(Clone, Hash)] pub struct OutputTypes(BTreeMap>); @@ -273,7 +273,7 @@ impl OutputTypes { // Use tree-based collections to cheaply get a deterministic Hash implementation. // DO NOT switch BTreeMap or BTreeSet out for an unsorted container type! That -// would break dependency tracking for commandline arguments. +// would break dependency tracking for command-line arguments. #[derive(Clone, Hash)] pub struct Externs(BTreeMap>>); @@ -339,7 +339,7 @@ macro_rules! top_level_options { ); } -// The top-level commandline options struct +// The top-level command-line options struct // // For each option, one has to specify how it behaves with regard to the // dependency tracking system of incremental compilation. This is done via the @@ -802,7 +802,7 @@ macro_rules! options { pub const parse_opt_uint: Option<&'static str> = Some("a number"); pub const parse_panic_strategy: Option<&'static str> = - Some("either `panic` or `abort`"); + Some("either `unwind` or `abort`"); pub const parse_relro_level: Option<&'static str> = Some("one of: `full`, `partial`, or `off`"); pub const parse_sanitizer: Option<&'static str> = @@ -1149,8 +1149,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "when debug-printing compiler state, do not include spans"), // o/w tests have closure@path identify_regions: bool = (false, parse_bool, [UNTRACKED], "make unnamed regions display as '# (where # is some non-ident unique id)"), - emit_end_regions: bool = (false, parse_bool, [UNTRACKED], - "emit EndRegion as part of MIR; enable transforms that solely process EndRegion"), borrowck: Option = (None, parse_opt_string, [UNTRACKED], "select which borrowck is used (`ast`, `mir`, `migrate`, or `compare`)"), two_phase_borrows: bool = (false, parse_bool, [UNTRACKED], @@ -1282,15 +1280,12 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "in addition to `.mir` files, create graphviz `.dot` files"), dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED], "if set, exclude the pass number when dumping MIR (used in tests)"), - mir_emit_validate: usize = (0, parse_uint, [TRACKED], - "emit Validate MIR statements, interpreted e.g. by miri (0: do not emit; 1: if function \ - contains unsafe block, only validate arguments; 2: always emit full validation)"), + mir_emit_retag: bool = (false, parse_bool, [TRACKED], + "emit Retagging MIR statements, interpreted e.g. by miri; implies -Zmir-opt-level=0"), perf_stats: bool = (false, parse_bool, [UNTRACKED], "print some performance-related statistics"), hir_stats: bool = (false, parse_bool, [UNTRACKED], "print some statistics about AST and HIR"), - mir_stats: bool = (false, parse_bool, [UNTRACKED], - "print some statistics about MIR"), always_encode_mir: bool = (false, parse_bool, [TRACKED], "encode MIR of all functions into the crate metadata"), osx_rpath_install_name: bool = (false, parse_bool, [TRACKED], @@ -2083,7 +2078,7 @@ pub fn build_session_options_and_crate_config( error_format, &format!( "optimization level needs to be \ - between 0-3 (instead was `{}`)", + between 0-3, s or z (instead was `{}`)", arg ), ); @@ -2203,8 +2198,7 @@ pub fn build_session_options_and_crate_config( if !cg.remark.is_empty() && debuginfo == DebugInfo::None { early_warn( error_format, - "-C remark will not show source locations without \ - --debuginfo", + "-C remark requires \"-C debuginfo=n\" to show source locations", ); } @@ -2379,11 +2373,11 @@ impl fmt::Display for CrateType { } } -/// Commandline arguments passed to the compiler have to be incorporated with +/// Command-line arguments passed to the compiler have to be incorporated with /// the dependency tracking system for incremental compilation. This module /// provides some utilities to make this more convenient. /// -/// The values of all commandline arguments that are relevant for dependency +/// The values of all command-line arguments that are relevant for dependency /// tracking are hashed into a single value that determines whether the /// incremental compilation cache can be re-used or not. This hashing is done /// via the DepTrackingHash trait defined below, since the standard Hash @@ -2396,7 +2390,7 @@ impl fmt::Display for CrateType { /// impl_dep_tracking_hash_via_hash!() macro that allows to simply reuse the /// Hash implementation for DepTrackingHash. It's important though that /// we have an opt-in scheme here, so one is hopefully forced to think about -/// how the hash should be calculated when adding a new commandline argument. +/// how the hash should be calculated when adding a new command-line argument. mod dep_tracking { use lint; use middle::cstore; diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index d5513080da..d1c3acc57b 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -68,7 +68,7 @@ pub struct Session { /// For a library crate, this is always none pub entry_fn: Once>, pub plugin_registrar_fn: Once>, - pub derive_registrar_fn: Once>, + pub proc_macro_decls_static: Once>, pub default_sysroot: Option, /// The name of the root source file of the crate, in the local file system. /// `None` means that there is no source file. @@ -112,7 +112,6 @@ pub struct Session { /// The metadata::creader module may inject an allocator/panic_runtime /// dependency if it didn't already find one, and this tracks what was /// injected. - pub injected_allocator: Once>, pub allocator_kind: Once>, pub injected_panic_runtime: Once>, @@ -394,7 +393,7 @@ impl Session { match id.as_usize().checked_add(count) { Some(next) => { - self.next_node_id.set(ast::NodeId::new(next)); + self.next_node_id.set(ast::NodeId::from_usize(next)); } None => bug!("Input too large, ran out of node ids!"), } @@ -688,9 +687,9 @@ impl Session { ) } - pub fn generate_derive_registrar_symbol(&self, disambiguator: CrateDisambiguator) -> String { + pub fn generate_proc_macro_decls_symbol(&self, disambiguator: CrateDisambiguator) -> String { format!( - "__rustc_derive_registrar_{}__", + "__rustc_proc_macro_decls_{}__", disambiguator.to_fingerprint().to_hex() ) } @@ -827,8 +826,10 @@ impl Session { } pub fn profiler ()>(&self, f: F) { - let mut profiler = self.self_profiling.borrow_mut(); - f(&mut profiler); + if self.opts.debugging_opts.self_profile { + let mut profiler = self.self_profiling.borrow_mut(); + f(&mut profiler); + } } pub fn print_profiler_results(&self) { @@ -868,7 +869,7 @@ impl Session { let fuel = self.optimization_fuel_limit.get(); ret = fuel != 0; if fuel == 0 && !self.out_of_fuel.get() { - println!("optimization-fuel-exhausted: {}", msg()); + eprintln!("optimization-fuel-exhausted: {}", msg()); self.out_of_fuel.set(true); } else if fuel > 0 { self.optimization_fuel_limit.set(fuel - 1); @@ -1145,12 +1146,12 @@ pub fn build_session_( // For a library crate, this is always none entry_fn: Once::new(), plugin_registrar_fn: Once::new(), - derive_registrar_fn: Once::new(), + proc_macro_decls_static: Once::new(), default_sysroot, local_crate_source_file, working_dir, lint_store: RwLock::new(lint::LintStore::new()), - buffered_lints: Lock::new(Some(lint::LintBuffer::new())), + buffered_lints: Lock::new(Some(Default::default())), one_time_diagnostics: Default::default(), plugin_llvm_passes: OneThread::new(RefCell::new(Vec::new())), plugin_attributes: OneThread::new(RefCell::new(Vec::new())), @@ -1161,8 +1162,7 @@ pub fn build_session_( recursion_limit: Once::new(), type_length_limit: Once::new(), const_eval_stack_frame_limit: 100, - next_node_id: OneThread::new(Cell::new(NodeId::new(1))), - injected_allocator: Once::new(), + next_node_id: OneThread::new(Cell::new(NodeId::from_u32(1))), allocator_kind: Once::new(), injected_panic_runtime: Once::new(), imported_macro_spans: OneThread::new(RefCell::new(FxHashMap::default())), diff --git a/src/librustc/traits/auto_trait.rs b/src/librustc/traits/auto_trait.rs index 50ca6ca78a..c3cca149c2 100644 --- a/src/librustc/traits/auto_trait.rs +++ b/src/librustc/traits/auto_trait.rs @@ -309,7 +309,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { ) -> Option<(ty::ParamEnv<'c>, ty::ParamEnv<'c>)> { let tcx = infcx.tcx; - let mut select = SelectionContext::new(&infcx); + let mut select = SelectionContext::with_negative(&infcx, true); let mut already_visited = FxHashSet::default(); let mut predicates = VecDeque::new(); @@ -338,6 +338,21 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { match &result { &Ok(Some(ref vtable)) => { + // If we see an explicit negative impl (e.g. 'impl !Send for MyStruct'), + // we immediately bail out, since it's impossible for us to continue. + match vtable { + Vtable::VtableImpl(VtableImplData { impl_def_id, .. }) => { + // Blame tidy for the weird bracket placement + if infcx.tcx.impl_polarity(*impl_def_id) == hir::ImplPolarity::Negative + { + debug!("evaluate_nested_obligations: Found explicit negative impl\ + {:?}, bailing out", impl_def_id); + return None; + } + }, + _ => {} + } + let obligations = vtable.clone().nested_obligations().into_iter(); if !self.evaluate_nested_obligations( @@ -447,27 +462,51 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { ty::RegionKind::ReLateBound(_, _), ) => {} - (ty::RegionKind::ReLateBound(_, _), _) => { + (ty::RegionKind::ReLateBound(_, _), _) | + (_, ty::RegionKind::ReVar(_)) => { + // One of these is true: // The new predicate has a HRTB in a spot where the old // predicate does not (if they both had a HRTB, the previous - // match arm would have executed). + // match arm would have executed). A HRBT is a 'stricter' + // bound than anything else, so we want to keep the newer + // predicate (with the HRBT) in place of the old predicate. // - // The means we want to remove the older predicate from - // user_computed_preds, since having both it and the new + // OR + // + // The old predicate has a region variable where the new + // predicate has some other kind of region. An region + // variable isn't something we can actually display to a user, + // so we choose ther new predicate (which doesn't have a region + // varaible). + // + // In both cases, we want to remove the old predicate, + // from user_computed_preds, and replace it with the new + // one. Having both the old and the new // predicate in a ParamEnv would confuse SelectionContext + // // We're currently in the predicate passed to 'retain', // so we return 'false' to remove the old predicate from // user_computed_preds return false; } - (_, ty::RegionKind::ReLateBound(_, _)) => { - // This is the opposite situation as the previous arm - the - // old predicate has a HRTB lifetime in a place where the - // new predicate does not. We want to leave the old + (_, ty::RegionKind::ReLateBound(_, _)) | + (ty::RegionKind::ReVar(_), _) => { + // This is the opposite situation as the previous arm. + // One of these is true: + // + // The old predicate has a HRTB lifetime in a place where the + // new predicate does not. + // + // OR + // + // The new predicate has a region variable where the old + // predicate has some other type of region. + // + // We want to leave the old // predicate in user_computed_preds, and skip adding // new_pred to user_computed_params. should_add_new = false - } + }, _ => {} } } @@ -683,8 +722,8 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { } &ty::Predicate::TypeOutlives(ref binder) => { match ( - binder.no_late_bound_regions(), - binder.map_bound_ref(|pred| pred.0).no_late_bound_regions(), + binder.no_bound_vars(), + binder.map_bound_ref(|pred| pred.0).no_bound_vars(), ) { (None, Some(t_a)) => { select.infcx().register_region_obligation_with_cause( diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs index 817e9ffcbb..4bf8ba0d6d 100644 --- a/src/librustc/traits/coherence.rs +++ b/src/librustc/traits/coherence.rs @@ -11,8 +11,8 @@ //! See rustc guide chapters on [trait-resolution] and [trait-specialization] for more info on how //! this works. //! -//! [trait-resolution]: https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html -//! [trait-specialization]: https://rust-lang-nursery.github.io/rustc-guide/traits/specialization.html +//! [trait-resolution]: https://rust-lang.github.io/rustc-guide/traits/resolution.html +//! [trait-specialization]: https://rust-lang.github.io/rustc-guide/traits/specialization.html use hir::def_id::{DefId, LOCAL_CRATE}; use syntax_pos::DUMMY_SP; @@ -455,7 +455,7 @@ fn ty_is_local_constructor(ty: Ty<'_>, in_crate: InCrate) -> bool { false } - ty::Infer(..) => match in_crate { + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) => match in_crate { InCrate::Local => false, // The inference variable might be unified with a local // type in that remote crate. diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index b6df8ebe90..7e97dc3c84 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -34,7 +34,6 @@ use hir::def_id::DefId; use infer::{self, InferCtxt}; use infer::type_variable::TypeVariableOrigin; use std::fmt; -use std::iter; use syntax::ast; use session::DiagnosticMessageId; use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; @@ -213,10 +212,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // cause I have no idea for a good error message. if let ty::Predicate::Projection(ref data) = predicate { let mut selcx = SelectionContext::new(self); - let (data, _) = self.replace_late_bound_regions_with_fresh_var( + let (data, _) = self.replace_bound_vars_with_fresh_vars( obligation.cause.span, infer::LateBoundRegionConversionTime::HigherRankedType, - data); + data + ); let mut obligations = vec![]; let normalized_ty = super::normalize_projection_type( &mut selcx, @@ -281,7 +281,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::Generator(..) => Some(18), ty::Foreign(..) => Some(19), ty::GeneratorWitness(..) => Some(20), - ty::Infer(..) | ty::Error => None, + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error => None, ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"), } } @@ -429,7 +429,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { )); let tcx = self.tcx; if let Some(len) = len.val.try_to_scalar().and_then(|scalar| { - scalar.to_usize(tcx).ok() + scalar.to_usize(&tcx).ok() }) { flags.push(( "_Self".to_owned(), @@ -755,7 +755,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ty::Predicate::ObjectSafe(trait_def_id) => { - let violations = self.tcx.object_safety_violations(trait_def_id); + let violations = self.tcx.global_tcx() + .object_safety_violations(trait_def_id); self.tcx.report_object_safety_error(span, trait_def_id, violations) @@ -876,22 +877,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } TraitNotObjectSafe(did) => { - let violations = self.tcx.object_safety_violations(did); + let violations = self.tcx.global_tcx().object_safety_violations(did); self.tcx.report_object_safety_error(span, did, violations) } - ConstEvalFailure(ref err) => { - match err.struct_error( - self.tcx.at(span), - "could not evaluate constant expression", - ) { - Some(err) => err, - None => { - self.tcx.sess.delay_span_bug(span, - &format!("constant in type had an ignored error: {:?}", err)); - return; - } - } + // already reported in the query + ConstEvalFailure(_) => { + self.tcx.sess.delay_span_bug(span, "constant in type had an ignored error"); + return; } Overflow => { @@ -1099,16 +1092,27 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { if let Some(found_span) = found_span { err.span_label(found_span, format!("takes {}", found_str)); + // move |_| { ... } + // ^^^^^^^^-- def_span + // + // move |_| { ... } + // ^^^^^-- prefix + let prefix_span = self.tcx.sess.source_map().span_until_non_whitespace(found_span); + // move |_| { ... } + // ^^^-- pipe_span + let pipe_span = if let Some(span) = found_span.trim_start(prefix_span) { + span + } else { + found_span + }; + // Suggest to take and ignore the arguments with expected_args_length `_`s if // found arguments is empty (assume the user just wants to ignore args in this case). // For example, if `expected_args_length` is 2, suggest `|_, _|`. if found_args.is_empty() && is_closure { - let underscores = iter::repeat("_") - .take(expected_args.len()) - .collect::>() - .join(", "); + let underscores = vec!["_"; expected_args.len()].join(", "); err.span_suggestion_with_applicability( - found_span, + pipe_span, &format!( "consider changing the closure to take and ignore the expected argument{}", if expected_args.len() < 2 { diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs index e6bf02cd73..bc091a4e7e 100644 --- a/src/librustc/traits/fulfill.rs +++ b/src/librustc/traits/fulfill.rs @@ -9,15 +9,14 @@ // except according to those terms. use infer::InferCtxt; -use mir::interpret::GlobalId; +use mir::interpret::{GlobalId, ErrorHandled}; use ty::{self, Ty, TypeFoldable, ToPolyTraitRef, ToPredicate}; use ty::error::ExpectedFound; -use rustc_data_structures::obligation_forest::{Error, ForestObligation, ObligationForest}; -use rustc_data_structures::obligation_forest::{ObligationProcessor, ProcessResult}; +use rustc_data_structures::obligation_forest::{DoCompleted, Error, ForestObligation}; +use rustc_data_structures::obligation_forest::{ObligationForest, ObligationProcessor}; +use rustc_data_structures::obligation_forest::{ProcessResult}; use std::marker::PhantomData; use hir::def_id::DefId; -use mir::interpret::ConstEvalErr; -use mir::interpret::EvalErrorKind; use super::CodeAmbiguity; use super::CodeProjectionError; @@ -100,7 +99,7 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { let outcome = self.predicates.process_obligations(&mut FulfillProcessor { selcx, register_region_obligations: self.register_region_obligations - }); + }, DoCompleted::No); debug!("select: outcome={:#?}", outcome); // FIXME: if we kept the original cache key, we could mark projection @@ -145,7 +144,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { debug!("normalize_projection_type(projection_ty={:?})", projection_ty); - debug_assert!(!projection_ty.has_escaping_regions()); + debug_assert!(!projection_ty.has_escaping_bound_vars()); // FIXME(#20304) -- cache @@ -351,15 +350,15 @@ impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, } ty::Predicate::TypeOutlives(ref binder) => { - // Check if there are higher-ranked regions. - match binder.no_late_bound_regions() { + // Check if there are higher-ranked vars. + match binder.no_bound_vars() { // If there are, inspect the underlying type further. None => { // Convert from `Binder>` to `Binder`. let binder = binder.map_bound_ref(|pred| pred.0); - // Check if the type has any bound regions. - match binder.no_late_bound_regions() { + // Check if the type has any bound vars. + match binder.no_bound_vars() { // If so, this obligation is an error (for now). Eventually we should be // able to support additional cases here, like `for<'a> &'a str: 'a`. // NOTE: this is duplicate-implemented between here and fulfillment. @@ -495,13 +494,9 @@ impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, CodeSelectionError(ConstEvalFailure(err))) } } else { - ProcessResult::Error( - CodeSelectionError(ConstEvalFailure(ConstEvalErr { - span: obligation.cause.span, - error: EvalErrorKind::TooGeneric.into(), - stacktrace: vec![], - }.into())) - ) + ProcessResult::Error(CodeSelectionError( + ConstEvalFailure(ErrorHandled::TooGeneric) + )) } }, None => { diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs index c7e5d89007..ab2fa68ab5 100644 --- a/src/librustc/traits/mod.rs +++ b/src/librustc/traits/mod.rs @@ -10,7 +10,7 @@ //! Trait Resolution. See [rustc guide] for more info on how this works. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html pub use self::SelectionError::*; pub use self::FulfillmentErrorCode::*; @@ -23,7 +23,7 @@ use hir::def_id::DefId; use infer::SuppressRegionErrors; use infer::outlives::env::OutlivesEnvironment; use middle::region; -use mir::interpret::ConstEvalErr; +use mir::interpret::ErrorHandled; use ty::subst::Substs; use ty::{self, AdtKind, List, Ty, TyCtxt, GenericParamDefKind, ToPredicate}; use ty::error::{ExpectedFound, TypeError}; @@ -50,11 +50,8 @@ pub use self::select::{EvaluationResult, IntercrateAmbiguityCause, OverflowError pub use self::specialize::{OverlapError, specialization_graph, translate_substs}; pub use self::specialize::find_associated_item; pub use self::engine::{TraitEngine, TraitEngineExt}; -pub use self::util::elaborate_predicates; -pub use self::util::supertraits; -pub use self::util::Supertraits; -pub use self::util::supertrait_def_ids; -pub use self::util::SupertraitDefIds; +pub use self::util::{elaborate_predicates, elaborate_trait_ref, elaborate_trait_refs}; +pub use self::util::{supertraits, supertrait_def_ids, Supertraits, SupertraitDefIds}; pub use self::util::transitive_bounds; #[allow(dead_code)] @@ -352,7 +349,7 @@ impl<'tcx> GoalKind<'tcx> { domain_goal: PolyDomainGoal<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>, ) -> GoalKind<'tcx> { - match domain_goal.no_late_bound_regions() { + match domain_goal.no_bound_vars() { Some(p) => p.into_goal(), None => GoalKind::Quantified( QuantifierKind::Universal, @@ -438,7 +435,7 @@ pub enum SelectionError<'tcx> { ty::PolyTraitRef<'tcx>, ty::error::TypeError<'tcx>), TraitNotObjectSafe(DefId), - ConstEvalFailure(Lrc>), + ConstEvalFailure(ErrorHandled), Overflow, } @@ -534,8 +531,11 @@ pub enum Vtable<'tcx, N> { /// Same as above, but for a fn pointer type with the given signature. VtableFnPointer(VtableFnPointerData<'tcx, N>), - /// Vtable automatically generated for a generator + /// Vtable automatically generated for a generator. VtableGenerator(VtableGeneratorData<'tcx, N>), + + /// Vtable for a trait alias. + VtableTraitAlias(VtableTraitAliasData<'tcx, N>), } /// Identifies a particular impl in the source, along with a set of @@ -605,6 +605,13 @@ pub struct VtableFnPointerData<'tcx, N> { pub nested: Vec } +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub struct VtableTraitAliasData<'tcx, N> { + pub alias_def_id: DefId, + pub substs: &'tcx Substs<'tcx>, + pub nested: Vec, +} + /// Creates predicate obligations from the generic bounds. pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, param_env: ty::ParamEnv<'tcx>, @@ -1045,6 +1052,7 @@ impl<'tcx,O> Obligation<'tcx,O> { } impl<'tcx> ObligationCause<'tcx> { + #[inline] pub fn new(span: Span, body_id: ast::NodeId, code: ObligationCauseCode<'tcx>) @@ -1072,6 +1080,7 @@ impl<'tcx, N> Vtable<'tcx, N> { VtableGenerator(c) => c.nested, VtableObject(d) => d.nested, VtableFnPointer(d) => d.nested, + VtableTraitAlias(d) => d.nested, } } @@ -1095,20 +1104,25 @@ impl<'tcx, N> Vtable<'tcx, N> { trait_def_id: d.trait_def_id, nested: d.nested.into_iter().map(f).collect(), }), - VtableFnPointer(p) => VtableFnPointer(VtableFnPointerData { - fn_ty: p.fn_ty, - nested: p.nested.into_iter().map(f).collect(), + VtableClosure(c) => VtableClosure(VtableClosureData { + closure_def_id: c.closure_def_id, + substs: c.substs, + nested: c.nested.into_iter().map(f).collect(), }), VtableGenerator(c) => VtableGenerator(VtableGeneratorData { generator_def_id: c.generator_def_id, substs: c.substs, nested: c.nested.into_iter().map(f).collect(), }), - VtableClosure(c) => VtableClosure(VtableClosureData { - closure_def_id: c.closure_def_id, - substs: c.substs, - nested: c.nested.into_iter().map(f).collect(), - }) + VtableFnPointer(p) => VtableFnPointer(VtableFnPointerData { + fn_ty: p.fn_ty, + nested: p.nested.into_iter().map(f).collect(), + }), + VtableTraitAlias(d) => VtableTraitAlias(VtableTraitAliasData { + alias_def_id: d.alias_def_id, + substs: d.substs, + nested: d.nested.into_iter().map(f).collect(), + }), } } } diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs index d5942e738f..2909daf22b 100644 --- a/src/librustc/traits/object_safety.rs +++ b/src/librustc/traits/object_safety.rs @@ -13,7 +13,8 @@ //! object if all of their methods meet certain criteria. In particular, //! they must: //! -//! - have a suitable receiver from which we can extract a vtable; +//! - have a suitable receiver from which we can extract a vtable and coerce to a "thin" version +//! that doesn't contain the vtable; //! - not reference the erased type `Self` except for in this receiver; //! - not have generic type parameters @@ -21,11 +22,12 @@ use super::elaborate_predicates; use hir::def_id::DefId; use lint; -use traits; -use ty::{self, Ty, TyCtxt, TypeFoldable}; -use ty::util::ExplicitSelf; +use traits::{self, Obligation, ObligationCause}; +use ty::{self, Ty, TyCtxt, TypeFoldable, Predicate, ToPredicate}; +use ty::subst::{Subst, Substs}; use std::borrow::Cow; -use syntax::ast; +use std::iter::{self}; +use syntax::ast::{self, Name}; use syntax_pos::Span; #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] @@ -62,8 +64,8 @@ impl ObjectSafetyViolation { format!("method `{}` references the `Self` type in where clauses", name).into(), ObjectSafetyViolation::Method(name, MethodViolationCode::Generic) => format!("method `{}` has generic type parameters", name).into(), - ObjectSafetyViolation::Method(name, MethodViolationCode::NonStandardSelfType) => - format!("method `{}` has a non-standard `self` type", name).into(), + ObjectSafetyViolation::Method(name, MethodViolationCode::UndispatchableReceiver) => + format!("method `{}`'s receiver cannot be dispatched on", name).into(), ObjectSafetyViolation::AssociatedConst(name) => format!("the trait cannot contain associated consts like `{}`", name).into(), } @@ -85,11 +87,11 @@ pub enum MethodViolationCode { /// e.g., `fn foo()` Generic, - /// arbitrary `self` type, e.g. `self: Rc` - NonStandardSelfType, + /// the method's receiver (`self` argument) can't be dispatched on + UndispatchableReceiver, } -impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { +impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { /// Returns the object safety violations that affect /// astconv - currently, Self in supertraits. This is needed @@ -113,6 +115,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn object_safety_violations(self, trait_def_id: DefId) -> Vec { + debug!("object_safety_violations: {:?}", trait_def_id); + traits::supertrait_def_ids(self, trait_def_id) .flat_map(|def_id| self.object_safety_violations_for_trait(def_id)) .collect() @@ -178,7 +182,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }; predicates .predicates - .into_iter() + .iter() .map(|(predicate, _)| predicate.subst_supertrait(self, &trait_ref)) .any(|predicate| { match predicate { @@ -277,23 +281,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { method: &ty::AssociatedItem) -> Option { - // The method's first parameter must be something that derefs (or - // autorefs) to `&self`. For now, we only accept `self`, `&self` - // and `Box`. + // The method's first parameter must be named `self` if !method.method_has_self_argument { return Some(MethodViolationCode::StaticMethod); } let sig = self.fn_sig(method.def_id); - let self_ty = self.mk_self_type(); - let self_arg_ty = sig.skip_binder().inputs()[0]; - if let ExplicitSelf::Other = ExplicitSelf::determine(self_arg_ty, |ty| ty == self_ty) { - return Some(MethodViolationCode::NonStandardSelfType); - } - - // The `Self` type is erased, so it should not appear in list of - // arguments or return type apart from the receiver. for input_ty in &sig.skip_binder().inputs()[1..] { if self.contains_illegal_self_type_reference(trait_def_id, input_ty) { return Some(MethodViolationCode::ReferencesSelf); @@ -308,9 +302,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return Some(MethodViolationCode::Generic); } - if self.predicates_of(method.def_id).predicates.into_iter() + if self.predicates_of(method.def_id).predicates.iter() // A trait object can't claim to live more than the concrete type, // so outlives predicates will always hold. + .cloned() .filter(|(p, _)| p.to_opt_type_outlives().is_none()) .collect::>() // Do a shallow visit so that `contains_illegal_self_type_reference` @@ -320,9 +315,254 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return Some(MethodViolationCode::WhereClauseReferencesSelf(span)); } + let receiver_ty = self.liberate_late_bound_regions( + method.def_id, + &sig.map_bound(|sig| sig.inputs()[0]), + ); + + // until `unsized_locals` is fully implemented, `self: Self` can't be dispatched on. + // However, this is already considered object-safe. We allow it as a special case here. + // FIXME(mikeyhew) get rid of this `if` statement once `receiver_is_dispatchable` allows + // `Receiver: Unsize dyn Trait]>` + if receiver_ty != self.mk_self_type() { + if !self.receiver_is_dispatchable(method, receiver_ty) { + return Some(MethodViolationCode::UndispatchableReceiver); + } else { + // sanity check to make sure the receiver actually has the layout of a pointer + + use ty::layout::Abi; + + let param_env = self.param_env(method.def_id); + + let abi_of_ty = |ty: Ty<'tcx>| -> &Abi { + match self.layout_of(param_env.and(ty)) { + Ok(layout) => &layout.abi, + Err(err) => bug!( + "Error: {}\n while computing layout for type {:?}", err, ty + ) + } + }; + + // e.g. Rc<()> + let unit_receiver_ty = self.receiver_for_self_ty( + receiver_ty, self.mk_unit(), method.def_id + ); + + match abi_of_ty(unit_receiver_ty) { + &Abi::Scalar(..) => (), + abi => bug!("Receiver when Self = () should have a Scalar ABI, found {:?}", abi) + } + + let trait_object_ty = self.object_ty_for_trait( + trait_def_id, self.mk_region(ty::ReStatic) + ); + + // e.g. Rc + let trait_object_receiver = self.receiver_for_self_ty( + receiver_ty, trait_object_ty, method.def_id + ); + + match abi_of_ty(trait_object_receiver) { + &Abi::ScalarPair(..) => (), + abi => bug!( + "Receiver when Self = {} should have a ScalarPair ABI, found {:?}", + trait_object_ty, abi + ) + } + } + } + None } + /// performs a type substitution to produce the version of receiver_ty when `Self = self_ty` + /// e.g. for receiver_ty = `Rc` and self_ty = `Foo`, returns `Rc` + fn receiver_for_self_ty( + self, receiver_ty: Ty<'tcx>, self_ty: Ty<'tcx>, method_def_id: DefId + ) -> Ty<'tcx> { + let substs = Substs::for_item(self, method_def_id, |param, _| { + if param.index == 0 { + self_ty.into() + } else { + self.mk_param_from_def(param) + } + }); + + receiver_ty.subst(self, substs) + } + + /// creates the object type for the current trait. For example, + /// if the current trait is `Deref`, then this will be + /// `dyn Deref + 'static` + fn object_ty_for_trait(self, trait_def_id: DefId, lifetime: ty::Region<'tcx>) -> Ty<'tcx> { + debug!("object_ty_for_trait: trait_def_id={:?}", trait_def_id); + + let trait_ref = ty::TraitRef::identity(self, trait_def_id); + + let trait_predicate = ty::ExistentialPredicate::Trait( + ty::ExistentialTraitRef::erase_self_ty(self, trait_ref) + ); + + let mut associated_types = traits::supertraits(self, ty::Binder::dummy(trait_ref)) + .flat_map(|trait_ref| self.associated_items(trait_ref.def_id())) + .filter(|item| item.kind == ty::AssociatedKind::Type) + .collect::>(); + + // existential predicates need to be in a specific order + associated_types.sort_by_cached_key(|item| self.def_path_hash(item.def_id)); + + let projection_predicates = associated_types.into_iter().map(|item| { + ty::ExistentialPredicate::Projection(ty::ExistentialProjection { + ty: self.mk_projection(item.def_id, trait_ref.substs), + item_def_id: item.def_id, + substs: trait_ref.substs, + }) + }); + + let existential_predicates = self.mk_existential_predicates( + iter::once(trait_predicate).chain(projection_predicates) + ); + + let object_ty = self.mk_dynamic( + ty::Binder::dummy(existential_predicates), + lifetime, + ); + + debug!("object_ty_for_trait: object_ty=`{}`", object_ty); + + object_ty + } + + /// checks the method's receiver (the `self` argument) can be dispatched on when `Self` is a + /// trait object. We require that `DispatchableFromDyn` be implemented for the receiver type + /// in the following way: + /// - let `Receiver` be the type of the `self` argument, i.e `Self`, `&Self`, `Rc` + /// - require the following bound: + /// + /// Receiver[Self => T]: DispatchFromDyn dyn Trait]> + /// + /// where `Foo[X => Y]` means "the same type as `Foo`, but with `X` replaced with `Y`" + /// (substitution notation). + /// + /// some examples of receiver types and their required obligation + /// - `&'a mut self` requires `&'a mut Self: DispatchFromDyn<&'a mut dyn Trait>` + /// - `self: Rc` requires `Rc: DispatchFromDyn>` + /// - `self: Pin>` requires `Pin>: DispatchFromDyn>>` + /// + /// The only case where the receiver is not dispatchable, but is still a valid receiver + /// type (just not object-safe), is when there is more than one level of pointer indirection. + /// e.g. `self: &&Self`, `self: &Rc`, `self: Box>`. In these cases, there + /// is no way, or at least no inexpensive way, to coerce the receiver from the version where + /// `Self = dyn Trait` to the version where `Self = T`, where `T` is the unknown erased type + /// contained by the trait object, because the object that needs to be coerced is behind + /// a pointer. + /// + /// In practice, we cannot use `dyn Trait` explicitly in the obligation because it would result + /// in a new check that `Trait` is object safe, creating a cycle. So instead, we fudge a little + /// by introducing a new type parameter `U` such that `Self: Unsize` and `U: Trait + ?Sized`, + /// and use `U` in place of `dyn Trait`. Written as a chalk-style query: + /// + /// forall (U: Trait + ?Sized) { + /// if (Self: Unsize) { + /// Receiver: DispatchFromDyn U]> + /// } + /// } + /// + /// for `self: &'a mut Self`, this means `&'a mut Self: DispatchFromDyn<&'a mut U>` + /// for `self: Rc`, this means `Rc: DispatchFromDyn>` + /// for `self: Pin>`, this means `Pin>: DispatchFromDyn>>` + // + // FIXME(mikeyhew) when unsized receivers are implemented as part of unsized rvalues, add this + // fallback query: `Receiver: Unsize U]>` to support receivers like + // `self: Wrapper`. + #[allow(dead_code)] + fn receiver_is_dispatchable( + self, + method: &ty::AssociatedItem, + receiver_ty: Ty<'tcx>, + ) -> bool { + debug!("receiver_is_dispatchable: method = {:?}, receiver_ty = {:?}", method, receiver_ty); + + let traits = (self.lang_items().unsize_trait(), + self.lang_items().dispatch_from_dyn_trait()); + let (unsize_did, dispatch_from_dyn_did) = if let (Some(u), Some(cu)) = traits { + (u, cu) + } else { + debug!("receiver_is_dispatchable: Missing Unsize or DispatchFromDyn traits"); + return false; + }; + + // the type `U` in the query + // use a bogus type parameter to mimick a forall(U) query using u32::MAX for now. + // FIXME(mikeyhew) this is a total hack, and we should replace it when real forall queries + // are implemented + let unsized_self_ty: Ty<'tcx> = self.mk_ty_param( + ::std::u32::MAX, + Name::intern("RustaceansAreAwesome").as_interned_str(), + ); + + // `Receiver[Self => U]` + let unsized_receiver_ty = self.receiver_for_self_ty( + receiver_ty, unsized_self_ty, method.def_id + ); + + // create a modified param env, with `Self: Unsize` and `U: Trait` added to caller bounds + // `U: ?Sized` is already implied here + let param_env = { + let mut param_env = self.param_env(method.def_id); + + // Self: Unsize + let unsize_predicate = ty::TraitRef { + def_id: unsize_did, + substs: self.mk_substs_trait(self.mk_self_type(), &[unsized_self_ty.into()]), + }.to_predicate(); + + // U: Trait + let trait_predicate = { + let substs = Substs::for_item(self, method.container.assert_trait(), |param, _| { + if param.index == 0 { + unsized_self_ty.into() + } else { + self.mk_param_from_def(param) + } + }); + + ty::TraitRef { + def_id: unsize_did, + substs, + }.to_predicate() + }; + + let caller_bounds: Vec> = param_env.caller_bounds.iter().cloned() + .chain(iter::once(unsize_predicate)) + .chain(iter::once(trait_predicate)) + .collect(); + + param_env.caller_bounds = self.intern_predicates(&caller_bounds); + + param_env + }; + + // Receiver: DispatchFromDyn U]> + let obligation = { + let predicate = ty::TraitRef { + def_id: dispatch_from_dyn_did, + substs: self.mk_substs_trait(receiver_ty, &[unsized_receiver_ty.into()]), + }.to_predicate(); + + Obligation::new( + ObligationCause::dummy(), + param_env, + predicate, + ) + }; + + self.infer_ctxt().enter(|ref infcx| { + // the receiver is dispatchable iff the obligation holds + infcx.predicate_must_hold(&obligation) + }) + } + fn contains_illegal_self_type_reference(self, trait_def_id: DefId, ty: Ty<'tcx>) diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index 2262516d37..1d3d66e82f 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -19,10 +19,7 @@ use super::PredicateObligation; use super::Selection; use super::SelectionContext; use super::SelectionError; -use super::VtableClosureData; -use super::VtableGeneratorData; -use super::VtableFnPointerData; -use super::VtableImplData; +use super::{VtableImplData, VtableClosureData, VtableGeneratorData, VtableFnPointerData}; use super::util; use hir::def_id::DefId; @@ -207,7 +204,7 @@ pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>( let infcx = selcx.infcx(); infcx.commit_if_ok(|snapshot| { let (placeholder_predicate, placeholder_map) = - infcx.replace_late_bound_regions_with_placeholders(&obligation.predicate); + infcx.replace_bound_vars_with_placeholders(&obligation.predicate); let skol_obligation = obligation.with(placeholder_predicate); let r = match project_and_unify_type(selcx, &skol_obligation) { @@ -269,7 +266,7 @@ fn project_and_unify_type<'cx, 'gcx, 'tcx>( }, Err(err) => { debug!("project_and_unify_type: equating types encountered error {:?}", err); - Err(MismatchedProjectionTypes { err: err }) + Err(MismatchedProjectionTypes { err }) } } } @@ -366,7 +363,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, let ty = ty.super_fold_with(self); match ty.sty { - ty::Opaque(def_id, substs) if !substs.has_escaping_regions() => { // (*) + ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => { // (*) // Only normalize `impl Trait` after type-checking, usually in codegen. match self.param_env.reveal { Reveal::UserFacing => ty, @@ -393,7 +390,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, } } - ty::Projection(ref data) if !data.has_escaping_regions() => { // (*) + ty::Projection(ref data) if !data.has_escaping_bound_vars() => { // (*) // (*) This is kind of hacky -- we need to be able to // handle normalization within binders because @@ -427,7 +424,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, if let ConstValue::Unevaluated(def_id, substs) = constant.val { let tcx = self.selcx.tcx().global_tcx(); if let Some(param_env) = self.tcx().lift_to_global(&self.param_env) { - if substs.needs_infer() || substs.has_skol() { + if substs.needs_infer() || substs.has_placeholders() { let identity_substs = Substs::identity_for_item(tcx, def_id); let instance = ty::Instance::resolve(tcx, param_env, def_id, identity_substs); if let Some(instance) = instance { @@ -1073,7 +1070,8 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( super::VtableClosure(_) | super::VtableGenerator(_) | super::VtableFnPointer(_) | - super::VtableObject(_) => { + super::VtableObject(_) | + super::VtableTraitAlias(_) => { debug!("assemble_candidates_from_impls: vtable={:?}", vtable); true @@ -1235,7 +1233,8 @@ fn confirm_select_candidate<'cx, 'gcx, 'tcx>( confirm_object_candidate(selcx, obligation, obligation_trait_ref), super::VtableAutoImpl(..) | super::VtableParam(..) | - super::VtableBuiltin(..) => + super::VtableBuiltin(..) | + super::VtableTraitAlias(..) => // we don't create Select candidates with this kind of resolution span_bug!( obligation.cause.span, @@ -1486,7 +1485,7 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>) -> Progress<'tcx> { - let VtableImplData { substs, nested, impl_def_id } = impl_vtable; + let VtableImplData { impl_def_id, substs, nested } = impl_vtable; let tcx = selcx.tcx(); let param_env = obligation.param_env; @@ -1619,7 +1618,7 @@ impl<'cx, 'gcx, 'tcx> ProjectionCacheKey<'tcx> { let infcx = selcx.infcx(); // We don't do cross-snapshot caching of obligations with escaping regions, // so there's no cache key to use - predicate.no_late_bound_regions() + predicate.no_bound_vars() .map(|predicate| ProjectionCacheKey { // We don't attempt to match up with a specific type-variable state // from a specific call to `opt_normalize_projection_type` - if @@ -1653,15 +1652,15 @@ impl<'tcx> ProjectionCache<'tcx> { } pub fn rollback_to(&mut self, snapshot: ProjectionCacheSnapshot) { - self.map.rollback_to(&snapshot.snapshot); + self.map.rollback_to(snapshot.snapshot); } pub fn rollback_placeholder(&mut self, snapshot: &ProjectionCacheSnapshot) { - self.map.partial_rollback(&snapshot.snapshot, &|k| k.ty.has_re_skol()); + self.map.partial_rollback(&snapshot.snapshot, &|k| k.ty.has_re_placeholders()); } - pub fn commit(&mut self, snapshot: &ProjectionCacheSnapshot) { - self.map.commit(&snapshot.snapshot); + pub fn commit(&mut self, snapshot: ProjectionCacheSnapshot) { + self.map.commit(snapshot.snapshot); } /// Try to start normalize `key`; returns an error if @@ -1715,12 +1714,8 @@ impl<'tcx> ProjectionCache<'tcx> { /// to be a NormalizedTy. pub fn complete_normalized(&mut self, key: ProjectionCacheKey<'tcx>, ty: &NormalizedTy<'tcx>) { // We want to insert `ty` with no obligations. If the existing value - // already has no obligations (as is common) we can use `insert_noop` - // to do a minimal amount of work -- the HashMap insertion is skipped, - // and minimal changes are made to the undo log. - if ty.obligations.is_empty() { - self.map.insert_noop(); - } else { + // already has no obligations (as is common) we don't insert anything. + if !ty.obligations.is_empty() { self.map.insert(key, ProjectionCacheEntry::NormalizedTy(Normalized { value: ty.value, obligations: vec![] diff --git a/src/librustc/traits/query/dropck_outlives.rs b/src/librustc/traits/query/dropck_outlives.rs index 8f7b0df8b9..b8bf0fcc15 100644 --- a/src/librustc/traits/query/dropck_outlives.rs +++ b/src/librustc/traits/query/dropck_outlives.rs @@ -200,7 +200,7 @@ impl_stable_hash_for!(struct DtorckConstraint<'tcx> { /// trivial for dropck-outlives. /// /// Note also that `needs_drop` requires a "global" type (i.e., one -/// with erased regions), but this funtcion does not. +/// with erased regions), but this function does not. pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { // None of these types have a destructor and hence they do not @@ -251,7 +251,9 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> | ty::Projection(..) | ty::Param(_) | ty::Opaque(..) + | ty::Placeholder(..) | ty::Infer(_) + | ty::Bound(..) | ty::Generator(..) => false, ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"), diff --git a/src/librustc/traits/query/normalize.rs b/src/librustc/traits/query/normalize.rs index 4adb65dc58..91b2ba301c 100644 --- a/src/librustc/traits/query/normalize.rs +++ b/src/librustc/traits/query/normalize.rs @@ -100,7 +100,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { let ty = ty.super_fold_with(self); match ty.sty { - ty::Opaque(def_id, substs) if !substs.has_escaping_regions() => { + ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => { // (*) // Only normalize `impl Trait` after type-checking, usually in codegen. match self.param_env.reveal { @@ -138,7 +138,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx } } - ty::Projection(ref data) if !data.has_escaping_regions() => { + ty::Projection(ref data) if !data.has_escaping_bound_vars() => { // (*) // (*) This is kind of hacky -- we need to be able to // handle normalization within binders because @@ -202,7 +202,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx if let ConstValue::Unevaluated(def_id, substs) = constant.val { let tcx = self.infcx.tcx.global_tcx(); if let Some(param_env) = self.tcx().lift_to_global(&self.param_env) { - if substs.needs_infer() || substs.has_skol() { + if substs.needs_infer() || substs.has_placeholders() { let identity_substs = Substs::identity_for_item(tcx, def_id); let instance = ty::Instance::resolve(tcx, param_env, def_id, identity_substs); if let Some(instance) = instance { diff --git a/src/librustc/traits/query/outlives_bounds.rs b/src/librustc/traits/query/outlives_bounds.rs index 99f557d44d..b3fae3bab3 100644 --- a/src/librustc/traits/query/outlives_bounds.rs +++ b/src/librustc/traits/query/outlives_bounds.rs @@ -164,7 +164,7 @@ pub fn explicit_outlives_bounds<'tcx>( ty::Predicate::ClosureKind(..) | ty::Predicate::TypeOutlives(..) | ty::Predicate::ConstEvaluatable(..) => None, - ty::Predicate::RegionOutlives(ref data) => data.no_late_bound_regions().map( + ty::Predicate::RegionOutlives(ref data) => data.no_bound_vars().map( |ty::OutlivesPredicate(r_a, r_b)| OutlivesBound::RegionSubRegion(r_b, r_a), ), }) diff --git a/src/librustc/traits/query/type_op/implied_outlives_bounds.rs b/src/librustc/traits/query/type_op/implied_outlives_bounds.rs index b113a322d3..d5233851db 100644 --- a/src/librustc/traits/query/type_op/implied_outlives_bounds.rs +++ b/src/librustc/traits/query/type_op/implied_outlives_bounds.rs @@ -38,19 +38,13 @@ impl<'gcx: 'tcx, 'tcx> super::QueryTypeOp<'gcx, 'tcx> for ImpliedOutlivesBounds< tcx: TyCtxt<'_, 'gcx, 'tcx>, canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, ) -> Fallible> { - // FIXME the query should take a `ImpliedOutlivesBounds` - let Canonical { - variables, - value: - ParamEnvAnd { - param_env, - value: ImpliedOutlivesBounds { ty }, - }, - } = canonicalized; - let canonicalized = Canonical { - variables, - value: param_env.and(ty), - }; + // FIXME this `unchecked_map` is only necessary because the + // query is defined as taking a `ParamEnvAnd`; it should + // take a `ImpliedOutlivesBounds` instead + let canonicalized = canonicalized.unchecked_map(|ParamEnvAnd { param_env, value }| { + let ImpliedOutlivesBounds { ty } = value; + param_env.and(ty) + }); tcx.implied_outlives_bounds(canonicalized) } diff --git a/src/librustc/traits/query/type_op/mod.rs b/src/librustc/traits/query/type_op/mod.rs index d20d43cf75..f8f9650ebe 100644 --- a/src/librustc/traits/query/type_op/mod.rs +++ b/src/librustc/traits/query/type_op/mod.rs @@ -53,7 +53,7 @@ pub trait TypeOp<'gcx, 'tcx>: Sized + fmt::Debug { /// first canonicalize the key and then invoke the query on the tcx, /// which produces the resulting query region constraints. /// -/// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html +/// [c]: https://rust-lang.github.io/rustc-guide/traits/canonicalization.html pub trait QueryTypeOp<'gcx: 'tcx, 'tcx>: fmt::Debug + Sized + TypeFoldable<'tcx> + Lift<'gcx> { diff --git a/src/librustc/traits/query/type_op/outlives.rs b/src/librustc/traits/query/type_op/outlives.rs index a36c5accd2..cd7c6d76ea 100644 --- a/src/librustc/traits/query/type_op/outlives.rs +++ b/src/librustc/traits/query/type_op/outlives.rs @@ -59,18 +59,10 @@ where // FIXME convert to the type expected by the `dropck_outlives` // query. This should eventually be fixed by changing the // *underlying query*. - let Canonical { - variables, - value: - ParamEnvAnd { - param_env, - value: DropckOutlives { dropped_ty }, - }, - } = canonicalized; - let canonicalized = Canonical { - variables, - value: param_env.and(dropped_ty), - }; + let canonicalized = canonicalized.unchecked_map(|ParamEnvAnd { param_env, value }| { + let DropckOutlives { dropped_ty } = value; + param_env.and(dropped_ty) + }); tcx.dropck_outlives(canonicalized) } diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs index 2ea16823cc..fb4c9f3bad 100644 --- a/src/librustc/traits/select.rs +++ b/src/librustc/traits/select.rs @@ -10,7 +10,7 @@ //! See [rustc guide] for more info on how this works. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html#selection +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html#selection use self::EvaluationResult::*; use self::SelectionCandidate::*; @@ -30,11 +30,11 @@ use super::{ObligationCause, PredicateObligation, TraitObligation}; use super::{OutputTypeParameterMismatch, Overflow, SelectionError, Unimplemented}; use super::{ VtableAutoImpl, VtableBuiltin, VtableClosure, VtableFnPointer, VtableGenerator, VtableImpl, - VtableObject, VtableParam, + VtableObject, VtableParam, VtableTraitAlias, }; use super::{ VtableAutoImplData, VtableBuiltinData, VtableClosureData, VtableFnPointerData, - VtableGeneratorData, VtableImplData, VtableObjectData, + VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData, }; use dep_graph::{DepKind, DepNodeIndex}; @@ -271,6 +271,8 @@ enum SelectionCandidate<'tcx> { /// types generated for a fn pointer type (e.g., `fn(int)->int`) FnPointerCandidate, + TraitAliasCandidate(DefId), + ObjectCandidate, BuiltinObjectCandidate, @@ -286,12 +288,13 @@ impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { ImplCandidate(def_id) => ImplCandidate(def_id), AutoImplCandidate(def_id) => AutoImplCandidate(def_id), ProjectionCandidate => ProjectionCandidate, + ClosureCandidate => ClosureCandidate, + GeneratorCandidate => GeneratorCandidate, FnPointerCandidate => FnPointerCandidate, + TraitAliasCandidate(def_id) => TraitAliasCandidate(def_id), ObjectCandidate => ObjectCandidate, BuiltinObjectCandidate => BuiltinObjectCandidate, BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, - ClosureCandidate => ClosureCandidate, - GeneratorCandidate => GeneratorCandidate, ParamCandidate(ref trait_ref) => { return tcx.lift(trait_ref).map(ParamCandidate); @@ -587,7 +590,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation: &TraitObligation<'tcx>, ) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({:?})", obligation); - debug_assert!(!obligation.predicate.has_escaping_regions()); + debug_assert!(!obligation.predicate.has_escaping_bound_vars()); let stack = self.push_stack(TraitObligationStackList::empty(), obligation); @@ -690,7 +693,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { match obligation.predicate { ty::Predicate::Trait(ref t) => { - debug_assert!(!t.has_escaping_regions()); + debug_assert!(!t.has_escaping_bound_vars()); let obligation = obligation.with(t.clone()); self.evaluate_trait_predicate_recursively(previous_stack, obligation) } @@ -722,9 +725,9 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { }, ty::Predicate::TypeOutlives(ref binder) => { - assert!(!binder.has_escaping_regions()); - // Check if the type has higher-ranked regions. - if binder.skip_binder().0.has_escaping_regions() { + assert!(!binder.has_escaping_bound_vars()); + // Check if the type has higher-ranked vars. + if binder.skip_binder().0.has_escaping_bound_vars() { // If so, this obligation is an error (for now). Eventually we should be // able to support additional cases here, like `for<'a> &'a str: 'a`. @@ -740,7 +743,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(EvaluatedToErr) } } else { - // If the type has no late bound regions, then if we assign all + // If the type has no late bound vars, then if we assign all // the inference variables in it to be 'static, then the type // will be 'static itself. // @@ -1170,7 +1173,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // candidates. See [rustc guide] for more details. // // [rustc guide]: - // https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html#candidate-assembly + // https://rust-lang.github.io/rustc-guide/traits/resolution.html#candidate-assembly fn candidate_from_obligation<'o>( &mut self, @@ -1199,7 +1202,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { "candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", cache_fresh_trait_pred, stack ); - debug_assert!(!stack.obligation.predicate.has_escaping_regions()); + debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars()); if let Some(c) = self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred) @@ -1368,7 +1371,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // Winnow, but record the exact outcome of evaluation, which // is needed for specialization. Propagate overflow if it occurs. - let mut candidates = candidates.into_iter() + let mut candidates = candidates + .into_iter() .map(|c| match self.evaluate_candidate(stack, &c) { Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate { candidate: c, @@ -1377,8 +1381,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(_) => Ok(None), Err(OverflowError) => Err(Overflow), }) - .flat_map(Result::transpose) - .collect::, _>>()?; + .flat_map(Result::transpose) + .collect::, _>>()?; debug!( "winnowed to {} candidates for {:?}: {:?}", @@ -1451,7 +1455,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let predicate = self.infcx() .resolve_type_vars_if_possible(&obligation.predicate); - // ok to skip binder because of the nature of the + // OK to skip binder because of the nature of the // trait-ref-is-knowable check, which does not care about // bound regions let trait_ref = predicate.skip_binder().trait_ref; @@ -1631,6 +1635,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ambiguous: false, }; + self.assemble_candidates_for_trait_alias(obligation, &mut candidates)?; + // Other bounds. Consider both in-scope bounds from fn decl // and applicable impls. There is a certain set of precedence rules here. let def_id = obligation.predicate.def_id(); @@ -1720,7 +1726,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let poly_trait_predicate = self.infcx() .resolve_type_vars_if_possible(&obligation.predicate); let (skol_trait_predicate, placeholder_map) = self.infcx() - .replace_late_bound_regions_with_placeholders(&poly_trait_predicate); + .replace_bound_vars_with_placeholders(&poly_trait_predicate); debug!( "match_projection_obligation_against_definition_bounds: \ skol_trait_predicate={:?} placeholder_map={:?}", @@ -1800,7 +1806,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { placeholder_map: &infer::PlaceholderMap<'tcx>, snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, ) -> bool { - debug_assert!(!skol_trait_ref.has_escaping_regions()); + debug_assert!(!skol_trait_ref.has_escaping_bound_vars()); if self.infcx .at(&obligation.cause, obligation.param_env) .sup(ty::Binder::dummy(skol_trait_ref), trait_bound) @@ -1878,7 +1884,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { return Ok(()); } - // ok to skip binder because the substs on generator types never + // OK to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = *obligation.self_ty().skip_binder(); @@ -1922,7 +1928,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } }; - // ok to skip binder because the substs on closure types never + // OK to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters match obligation.self_ty().skip_binder().sty { @@ -1972,7 +1978,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { return Ok(()); } - // ok to skip binder because what we are inspecting doesn't involve bound regions + // OK to skip binder because what we are inspecting doesn't involve bound regions let self_ty = *obligation.self_ty().skip_binder(); match self_ty.sty { ty::Infer(ty::TyVar(_)) => { @@ -2090,18 +2096,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.self_ty().skip_binder() ); - // Object-safety candidates are only applicable to object-safe - // traits. Including this check is useful because it helps - // inference in cases of traits like `BorrowFrom`, which are - // not object-safe, and which rely on being able to infer the - // self-type from one of the other inputs. Without this check, - // these cases wind up being considered ambiguous due to a - // (spurious) ambiguity introduced here. - let predicate_trait_ref = obligation.predicate.to_poly_trait_ref(); - if !self.tcx().is_object_safe(predicate_trait_ref.def_id()) { - return; - } - self.probe(|this, _snapshot| { // the code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase @@ -2179,7 +2173,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // T: Trait // so it seems ok if we (conservatively) fail to accept that `Unsize` // obligation above. Should be possible to extend this in the future. - let source = match obligation.self_ty().no_late_bound_regions() { + let source = match obligation.self_ty().no_bound_vars() { Some(t) => t, None => { // Don't add any candidates if there are bound regions. @@ -2249,6 +2243,24 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } } + fn assemble_candidates_for_trait_alias( + &mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>, + ) -> Result<(), SelectionError<'tcx>> { + // OK to skip binder here because the tests we do below do not involve bound regions + let self_ty = *obligation.self_ty().skip_binder(); + debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty); + + let def_id = obligation.predicate.def_id(); + + if ty::is_trait_alias(self.tcx(), def_id) { + candidates.vec.push(TraitAliasCandidate(def_id.clone())); + } + + Ok(()) + } + /////////////////////////////////////////////////////////////////////////// // WINNOW // @@ -2299,7 +2311,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate - | BuiltinCandidate { .. } => { + | BuiltinCandidate { .. } + | TraitAliasCandidate(..) => { // Global bounds from the where clause should be ignored // here (see issue #50825). Otherwise, we have a where // clause so don't go around looking for impls. @@ -2329,7 +2342,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { | FnPointerCandidate | BuiltinObjectCandidate | BuiltinUnsizeCandidate - | BuiltinCandidate { .. } => true, + | BuiltinCandidate { .. } + | TraitAliasCandidate(..) => true, ObjectCandidate | ProjectionCandidate => { // Arbitrarily give param candidates priority // over projection and object candidates. @@ -2456,7 +2470,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::Infer(ty::TyVar(_)) => Ambiguous, ty::UnnormalizedProjection(..) - | ty::Infer(ty::BoundTy(_)) + | ty::Placeholder(..) + | ty::Bound(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { @@ -2541,7 +2556,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } ty::UnnormalizedProjection(..) - | ty::Infer(ty::BoundTy(_)) + | ty::Placeholder(..) + | ty::Bound(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => { @@ -2580,11 +2596,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { | ty::Char => Vec::new(), ty::UnnormalizedProjection(..) + | ty::Placeholder(..) | ty::Dynamic(..) | ty::Param(..) | ty::Foreign(..) | ty::Projection(..) - | ty::Infer(ty::BoundTy(_)) + | ty::Bound(..) | ty::Infer(ty::TyVar(_)) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) @@ -2668,7 +2685,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.in_snapshot(|this, snapshot| { let (skol_ty, placeholder_map) = this.infcx() - .replace_late_bound_regions_with_placeholders(&ty); + .replace_bound_vars_with_placeholders(&ty); let Normalized { value: normalized_ty, mut obligations, @@ -2703,7 +2720,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // type error. See [rustc guide] for more details. // // [rustc guide]: - // https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html#confirmation + // https://rust-lang.github.io/rustc-guide/traits/resolution.html#confirmation fn confirm_candidate( &mut self, @@ -2723,15 +2740,20 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(VtableParam(obligations)) } + ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate( + obligation, + impl_def_id, + ))), + AutoImplCandidate(trait_def_id) => { let data = self.confirm_auto_impl_candidate(obligation, trait_def_id); Ok(VtableAutoImpl(data)) } - ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate( - obligation, - impl_def_id, - ))), + ProjectionCandidate => { + self.confirm_projection_candidate(obligation); + Ok(VtableParam(Vec::new())) + } ClosureCandidate => { let vtable_closure = self.confirm_closure_candidate(obligation)?; @@ -2743,13 +2765,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(VtableGenerator(vtable_generator)) } - BuiltinObjectCandidate => { - // This indicates something like `(Trait+Send) : - // Send`. In this case, we know that this holds - // because that's what the object type is telling us, - // and there's really no additional obligations to - // prove and no types in particular to unify etc. - Ok(VtableParam(Vec::new())) + FnPointerCandidate => { + let data = self.confirm_fn_pointer_candidate(obligation)?; + Ok(VtableFnPointer(data)) + } + + TraitAliasCandidate(alias_def_id) => { + let data = self.confirm_trait_alias_candidate(obligation, alias_def_id); + Ok(VtableTraitAlias(data)) } ObjectCandidate => { @@ -2757,13 +2780,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(VtableObject(data)) } - FnPointerCandidate => { - let data = self.confirm_fn_pointer_candidate(obligation)?; - Ok(VtableFnPointer(data)) - } - - ProjectionCandidate => { - self.confirm_projection_candidate(obligation); + BuiltinObjectCandidate => { + // This indicates something like `(Trait+Send) : + // Send`. In this case, we know that this holds + // because that's what the object type is telling us, + // and there's really no additional obligations to + // prove and no types in particular to unify etc. Ok(VtableParam(Vec::new())) } @@ -2876,7 +2898,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.vtable_auto_impl(obligation, trait_def_id, types) } - /// See `confirm_auto_impl_candidate` + /// See `confirm_auto_impl_candidate`. fn vtable_auto_impl( &mut self, obligation: &TraitObligation<'tcx>, @@ -2897,7 +2919,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let trait_obligations: Vec> = self.in_snapshot(|this, snapshot| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, placeholder_map) = this.infcx() - .replace_late_bound_regions_with_placeholders(&poly_trait_ref); + .replace_bound_vars_with_placeholders(&poly_trait_ref); let cause = obligation.derived_cause(ImplDerivedObligation); this.impl_or_trait_obligations( cause, @@ -2933,7 +2955,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // this time not in a probe. self.in_snapshot(|this, snapshot| { let (substs, placeholder_map) = this.rematch_impl(impl_def_id, obligation, snapshot); - debug!("confirm_impl_candidate substs={:?}", substs); + debug!("confirm_impl_candidate: substs={:?}", substs); let cause = obligation.derived_cause(ImplDerivedObligation); this.vtable_impl( impl_def_id, @@ -2997,16 +3019,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> { debug!("confirm_object_candidate({:?})", obligation); - // FIXME skipping binder here seems wrong -- we should - // probably flatten the binder from the obligation and the - // binder from the object. Have to try to make a broken test - // case that results. -nmatsakis + // FIXME(nmatsakis) skipping binder here seems wrong -- we should + // probably flatten the binder from the obligation and the binder + // from the object. Have to try to make a broken test case that + // results. let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { - ty::Dynamic(ref data, ..) => { - data.principal().with_self_ty(self.tcx(), self_ty) - } + ty::Dynamic(ref data, ..) => data.principal().with_self_ty(self.tcx(), self_ty), _ => span_bug!(obligation.cause.span, "object candidate with non-object"), }; @@ -3054,7 +3074,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ) -> Result>, SelectionError<'tcx>> { debug!("confirm_fn_pointer_candidate({:?})", obligation); - // ok to skip binder; it is reintroduced below + // OK to skip binder; it is reintroduced below let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let sig = self_ty.fn_sig(self.tcx()); @@ -3090,11 +3110,51 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { }) } + fn confirm_trait_alias_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + alias_def_id: DefId, + ) -> VtableTraitAliasData<'tcx, PredicateObligation<'tcx>> { + debug!( + "confirm_trait_alias_candidate({:?}, {:?})", + obligation, alias_def_id + ); + + self.in_snapshot(|this, snapshot| { + let (predicate, placeholder_map) = this.infcx() + .replace_bound_vars_with_placeholders(&obligation.predicate); + let trait_ref = predicate.trait_ref; + let trait_def_id = trait_ref.def_id; + let substs = trait_ref.substs; + + let trait_obligations = this.impl_or_trait_obligations( + obligation.cause.clone(), + obligation.recursion_depth, + obligation.param_env, + trait_def_id, + &substs, + placeholder_map, + snapshot, + ); + + debug!( + "confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}", + trait_def_id, trait_obligations + ); + + VtableTraitAliasData { + alias_def_id, + substs: substs, + nested: trait_obligations, + } + }) + } + fn confirm_generator_candidate( &mut self, obligation: &TraitObligation<'tcx>, ) -> Result>, SelectionError<'tcx>> { - // ok to skip binder because the substs on generator types never + // OK to skip binder because the substs on generator types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx @@ -3152,7 +3212,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { .fn_trait_kind(obligation.predicate.def_id()) .unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation)); - // ok to skip binder because the substs on closure types never + // OK to skip binder because the substs on closure types never // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx @@ -3248,7 +3308,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // assemble_candidates_for_unsizing should ensure there are no late bound // regions here. See the comment there for more details. let source = self.infcx - .shallow_resolve(obligation.self_ty().no_late_bound_regions().unwrap()); + .shallow_resolve(obligation.self_ty().no_bound_vars().unwrap()); let target = obligation .predicate .skip_binder() @@ -3469,7 +3529,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { _ => bug!(), }; - Ok(VtableBuiltinData { nested: nested }) + Ok(VtableBuiltinData { nested }) } /////////////////////////////////////////////////////////////////////////// @@ -3525,7 +3585,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } let (skol_obligation, placeholder_map) = self.infcx() - .replace_late_bound_regions_with_placeholders(&obligation.predicate); + .replace_bound_vars_with_placeholders(&obligation.predicate); let skol_obligation_trait_ref = skol_obligation.trait_ref; let impl_substs = self.infcx @@ -3666,8 +3726,17 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { closure_def_id: DefId, substs: ty::ClosureSubsts<'tcx>, ) -> ty::PolyTraitRef<'tcx> { + debug!( + "closure_trait_ref_unnormalized(obligation={:?}, closure_def_id={:?}, substs={:?})", + obligation, closure_def_id, substs, + ); let closure_type = self.infcx.closure_sig(closure_def_id, substs); + debug!( + "closure_trait_ref_unnormalized: closure_type = {:?}", + closure_type + ); + // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs index 0ce1d8f822..19ef3171b1 100644 --- a/src/librustc/traits/specialize/mod.rs +++ b/src/librustc/traits/specialize/mod.rs @@ -17,7 +17,7 @@ //! See the [rustc guide] for a bit more detail on how specialization //! fits together with the rest of the trait machinery. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/specialization.html +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/specialization.html use super::{SelectionContext, FulfillmentContext}; use super::util::impl_trait_ref_and_oblig; @@ -396,7 +396,10 @@ fn to_pretty_impl_header(tcx: TyCtxt<'_, '_, '_>, impl_def_id: DefId) -> Option< if !substs.is_noop() { types_without_default_bounds.extend(substs.types()); w.push('<'); - w.push_str(&substs.iter().map(|k| k.to_string()).collect::>().join(", ")); + w.push_str(&substs.iter() + .map(|k| k.to_string()) + .filter(|k| &k[..] != "'_") + .collect::>().join(", ")); w.push('>'); } @@ -404,7 +407,7 @@ fn to_pretty_impl_header(tcx: TyCtxt<'_, '_, '_>, impl_def_id: DefId) -> Option< // The predicates will contain default bounds like `T: Sized`. We need to // remove these bounds, and add `T: ?Sized` to any untouched type parameters. - let predicates = tcx.predicates_of(impl_def_id).predicates; + let predicates = &tcx.predicates_of(impl_def_id).predicates; let mut pretty_predicates = Vec::with_capacity( predicates.len() + types_without_default_bounds.len()); diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs index e237cab5ea..db0302f3a9 100644 --- a/src/librustc/traits/specialize/specialization_graph.rs +++ b/src/librustc/traits/specialize/specialization_graph.rs @@ -73,8 +73,8 @@ enum Inserted { /// The impl was inserted as a new child in this group of children. BecameNewSibling(Option), - /// The impl should replace an existing impl X, because the impl specializes X. - ReplaceChild(DefId), + /// The impl should replace existing impls [X1, ..], because the impl specializes X1, X2, etc. + ReplaceChildren(Vec), /// The impl is a specialization of an existing child. ShouldRecurseOn(DefId), @@ -124,6 +124,7 @@ impl<'a, 'gcx, 'tcx> Children { -> Result { let mut last_lint = None; + let mut replace_children = Vec::new(); debug!( "insert(impl_def_id={:?}, simplified_self={:?})", @@ -194,7 +195,7 @@ impl<'a, 'gcx, 'tcx> Children { debug!("placing as parent of TraitRef {:?}", tcx.impl_trait_ref(possible_sibling).unwrap()); - return Ok(Inserted::ReplaceChild(possible_sibling)); + replace_children.push(possible_sibling); } else { if !tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) { traits::overlapping_impls( @@ -211,6 +212,10 @@ impl<'a, 'gcx, 'tcx> Children { } } + if !replace_children.is_empty() { + return Ok(Inserted::ReplaceChildren(replace_children)); + } + // no overlap with any potential siblings, so add as a new sibling debug!("placing as new sibling"); self.insert_blindly(tcx, impl_def_id); @@ -282,7 +287,7 @@ impl<'a, 'gcx, 'tcx> Graph { last_lint = opt_lint; break; } - ReplaceChild(grand_child_to_be) => { + ReplaceChildren(grand_children_to_be) => { // We currently have // // P @@ -302,17 +307,23 @@ impl<'a, 'gcx, 'tcx> Graph { let siblings = self.children .get_mut(&parent) .unwrap(); - siblings.remove_existing(tcx, grand_child_to_be); + for &grand_child_to_be in &grand_children_to_be { + siblings.remove_existing(tcx, grand_child_to_be); + } siblings.insert_blindly(tcx, impl_def_id); } // Set G's parent to N and N's parent to P - self.parent.insert(grand_child_to_be, impl_def_id); + for &grand_child_to_be in &grand_children_to_be { + self.parent.insert(grand_child_to_be, impl_def_id); + } self.parent.insert(impl_def_id, parent); // Add G as N's child. - self.children.entry(impl_def_id).or_default() - .insert_blindly(tcx, grand_child_to_be); + for &grand_child_to_be in &grand_children_to_be { + self.children.entry(impl_def_id).or_default() + .insert_blindly(tcx, grand_child_to_be); + } break; } ShouldRecurseOn(new_parent) => { @@ -366,7 +377,7 @@ impl<'a, 'gcx, 'tcx> Node { pub fn items( &self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - ) -> impl Iterator + 'a { + ) -> ty::AssociatedItemsIterator<'a, 'gcx, 'tcx> { tcx.associated_items(self.def_id()) } diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs index c50c9703eb..36538ac088 100644 --- a/src/librustc/traits/structural_impls.rs +++ b/src/librustc/traits/structural_impls.rs @@ -14,9 +14,11 @@ use traits; use traits::project::Normalized; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use ty::{self, Lift, TyCtxt}; +use syntax::symbol::InternedString; use std::fmt; use std::rc::Rc; +use std::collections::{BTreeSet, BTreeMap}; // structural impls for the structs in traits @@ -62,6 +64,8 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> { super::VtableParam(ref n) => write!(f, "VtableParam({:?})", n), super::VtableBuiltin(ref d) => write!(f, "{:?}", d), + + super::VtableTraitAlias(ref d) => write!(f, "{:?}", d), } } } @@ -70,7 +74,7 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})", + "VtableImplData(impl_def_id={:?}, substs={:?}, nested={:?})", self.impl_def_id, self.substs, self.nested ) } @@ -80,7 +84,7 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableGeneratorData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "VtableGenerator(generator_def_id={:?}, substs={:?}, nested={:?})", + "VtableGeneratorData(generator_def_id={:?}, substs={:?}, nested={:?})", self.generator_def_id, self.substs, self.nested ) } @@ -90,7 +94,7 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})", + "VtableClosureData(closure_def_id={:?}, substs={:?}, nested={:?})", self.closure_def_id, self.substs, self.nested ) } @@ -98,7 +102,7 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> { impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "VtableBuiltin(nested={:?})", self.nested) + write!(f, "VtableBuiltinData(nested={:?})", self.nested) } } @@ -116,7 +120,7 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableObjectData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "VtableObject(upcast={:?}, vtable_base={}, nested={:?})", + "VtableObjectData(upcast={:?}, vtable_base={}, nested={:?})", self.upcast_trait_ref, self.vtable_base, self.nested ) } @@ -126,12 +130,22 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableFnPointerData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "VtableFnPointer(fn_ty={:?}, nested={:?})", + "VtableFnPointerData(fn_ty={:?}, nested={:?})", self.fn_ty, self.nested ) } } +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableTraitAliasData<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "VtableTraitAlias(alias_def_id={:?}, substs={:?}, nested={:?})", + self.alias_def_id, self.substs, self.nested + ) + } +} + impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "FulfillmentError({:?},{:?})", self.obligation, self.code) @@ -157,6 +171,290 @@ impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> { } } +impl<'tcx> fmt::Display for traits::WhereClause<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::WhereClause::*; + + // Bypass ppaux because it does not print out anonymous regions. + fn write_region_name<'tcx>( + r: ty::Region<'tcx>, + fmt: &mut fmt::Formatter<'_> + ) -> fmt::Result { + match r { + ty::ReLateBound(index, br) => match br { + ty::BoundRegion::BrNamed(_, name) => write!(fmt, "{}", name), + ty::BoundRegion::BrAnon(var) => { + if *index == ty::INNERMOST { + write!(fmt, "'^{}", var) + } else { + write!(fmt, "'^{}_{}", index.index(), var) + } + } + _ => write!(fmt, "'_"), + } + + _ => write!(fmt, "{}", r), + } + } + + match self { + Implemented(trait_ref) => write!(fmt, "Implemented({})", trait_ref), + ProjectionEq(projection) => write!(fmt, "ProjectionEq({})", projection), + RegionOutlives(predicate) => { + write!(fmt, "RegionOutlives({}: ", predicate.0)?; + write_region_name(predicate.1, fmt)?; + write!(fmt, ")") + } + TypeOutlives(predicate) => { + write!(fmt, "TypeOutlives({}: ", predicate.0)?; + write_region_name(predicate.1, fmt)?; + write!(fmt, ")") + } + } + } +} + +impl<'tcx> fmt::Display for traits::WellFormed<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::WellFormed::*; + + match self { + Trait(trait_ref) => write!(fmt, "WellFormed({})", trait_ref), + Ty(ty) => write!(fmt, "WellFormed({})", ty), + } + } +} + +impl<'tcx> fmt::Display for traits::FromEnv<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::FromEnv::*; + + match self { + Trait(trait_ref) => write!(fmt, "FromEnv({})", trait_ref), + Ty(ty) => write!(fmt, "FromEnv({})", ty), + } + } +} + +impl<'tcx> fmt::Display for traits::DomainGoal<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::DomainGoal::*; + + match self { + Holds(wc) => write!(fmt, "{}", wc), + WellFormed(wf) => write!(fmt, "{}", wf), + FromEnv(from_env) => write!(fmt, "{}", from_env), + Normalize(projection) => write!( + fmt, + "Normalize({} -> {})", + projection.projection_ty, + projection.ty + ), + } + } +} + +impl fmt::Display for traits::QuantifierKind { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::QuantifierKind::*; + + match self { + Universal => write!(fmt, "forall"), + Existential => write!(fmt, "exists"), + } + } +} + +/// Collect names for regions / types bound by a quantified goal / clause. +/// This collector does not try to do anything clever like in ppaux, it's just used +/// for debug output in tests anyway. +struct BoundNamesCollector { + // Just sort by name because `BoundRegion::BrNamed` does not have a `BoundVar` index anyway. + regions: BTreeSet, + + // Sort by `BoundVar` index, so usually this should be equivalent to the order given + // by the list of type parameters. + types: BTreeMap, + + binder_index: ty::DebruijnIndex, +} + +impl BoundNamesCollector { + fn new() -> Self { + BoundNamesCollector { + regions: BTreeSet::new(), + types: BTreeMap::new(), + binder_index: ty::INNERMOST, + } + } + + fn is_empty(&self) -> bool { + self.regions.is_empty() && self.types.is_empty() + } + + fn write_names(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut start = true; + for r in &self.regions { + if !start { + write!(fmt, ", ")?; + } + start = false; + write!(fmt, "{}", r)?; + } + for (_, t) in &self.types { + if !start { + write!(fmt, ", ")?; + } + start = false; + write!(fmt, "{}", t)?; + } + Ok(()) + } +} + +impl<'tcx> TypeVisitor<'tcx> for BoundNamesCollector { + fn visit_binder>(&mut self, t: &ty::Binder) -> bool { + self.binder_index.shift_in(1); + let result = t.super_visit_with(self); + self.binder_index.shift_out(1); + result + } + + fn visit_ty(&mut self, t: ty::Ty<'tcx>) -> bool { + use syntax::symbol::Symbol; + + match t.sty { + ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => { + self.types.insert( + bound_ty.var.as_u32(), + match bound_ty.kind { + ty::BoundTyKind::Param(name) => name, + ty::BoundTyKind::Anon => Symbol::intern( + &format!("^{}", bound_ty.var.as_u32()) + ).as_interned_str(), + } + ); + } + + _ => (), + }; + + t.super_visit_with(self) + } + + fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { + use syntax::symbol::Symbol; + + match r { + ty::ReLateBound(index, br) if *index == self.binder_index => { + match br { + ty::BoundRegion::BrNamed(_, name) => { + self.regions.insert(*name); + } + + ty::BoundRegion::BrAnon(var) => { + self.regions.insert(Symbol::intern( + &format!("'^{}", var) + ).as_interned_str()); + } + + _ => (), + } + } + + _ => (), + }; + + r.super_visit_with(self) + } +} + +impl<'tcx> fmt::Display for traits::Goal<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::GoalKind::*; + + match self { + Implies(hypotheses, goal) => { + write!(fmt, "if (")?; + for (index, hyp) in hypotheses.iter().enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{}", hyp)?; + } + write!(fmt, ") {{ {} }}", goal) + } + And(goal1, goal2) => write!(fmt, "({} && {})", goal1, goal2), + Not(goal) => write!(fmt, "not {{ {} }}", goal), + DomainGoal(goal) => write!(fmt, "{}", goal), + Quantified(qkind, goal) => { + let mut collector = BoundNamesCollector::new(); + goal.skip_binder().visit_with(&mut collector); + + if !collector.is_empty() { + write!(fmt, "{}<", qkind)?; + collector.write_names(fmt)?; + write!(fmt, "> {{ ")?; + } + + write!(fmt, "{}", goal.skip_binder())?; + + if !collector.is_empty() { + write!(fmt, " }}")?; + } + + Ok(()) + } + CannotProve => write!(fmt, "CannotProve"), + } + } +} + +impl<'tcx> fmt::Display for traits::ProgramClause<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let traits::ProgramClause { goal, hypotheses, .. } = self; + write!(fmt, "{}", goal)?; + if !hypotheses.is_empty() { + write!(fmt, " :- ")?; + for (index, condition) in hypotheses.iter().enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{}", condition)?; + } + } + write!(fmt, ".") + } +} + +impl<'tcx> fmt::Display for traits::Clause<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + use traits::Clause::*; + + match self { + Implies(clause) => write!(fmt, "{}", clause), + ForAll(clause) => { + let mut collector = BoundNamesCollector::new(); + clause.skip_binder().visit_with(&mut collector); + + if !collector.is_empty() { + write!(fmt, "forall<")?; + collector.write_names(fmt)?; + write!(fmt, "> {{ ")?; + } + + write!(fmt, "{}", clause.skip_binder())?; + + if !collector.is_empty() { + write!(fmt, " }}")?; + } + + Ok(()) + } + } + } +} + /////////////////////////////////////////////////////////////////////////// // Lift implementations @@ -172,9 +470,7 @@ impl<'a, 'tcx> Lift<'tcx> for traits::SelectionError<'a> { ) } super::TraitNotObjectSafe(def_id) => Some(super::TraitNotObjectSafe(def_id)), - super::ConstEvalFailure(ref err) => tcx.lift(&**err).map(|err| super::ConstEvalFailure( - err.into(), - )), + super::ConstEvalFailure(err) => Some(super::ConstEvalFailure(err)), super::Overflow => Some(super::Overflow), } } @@ -323,10 +619,106 @@ impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { nested, }) ), + traits::VtableTraitAlias(traits::VtableTraitAliasData { + alias_def_id, + substs, + nested, + }) => tcx.lift(&substs).map(|substs| + traits::VtableTraitAlias(traits::VtableTraitAliasData { + alias_def_id, + substs, + nested, + }) + ), } } } +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::WhereClause<'a> { + type Lifted = traits::WhereClause<'tcx>; + (traits::WhereClause::Implemented)(trait_ref), + (traits::WhereClause::ProjectionEq)(projection), + (traits::WhereClause::TypeOutlives)(ty_outlives), + (traits::WhereClause::RegionOutlives)(region_outlives), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::WellFormed<'a> { + type Lifted = traits::WellFormed<'tcx>; + (traits::WellFormed::Trait)(trait_ref), + (traits::WellFormed::Ty)(ty), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::FromEnv<'a> { + type Lifted = traits::FromEnv<'tcx>; + (traits::FromEnv::Trait)(trait_ref), + (traits::FromEnv::Ty)(ty), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::DomainGoal<'a> { + type Lifted = traits::DomainGoal<'tcx>; + (traits::DomainGoal::Holds)(wc), + (traits::DomainGoal::WellFormed)(wf), + (traits::DomainGoal::FromEnv)(from_env), + (traits::DomainGoal::Normalize)(projection), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::GoalKind<'a> { + type Lifted = traits::GoalKind<'tcx>; + (traits::GoalKind::Implies)(hypotheses, goal), + (traits::GoalKind::And)(goal1, goal2), + (traits::GoalKind::Not)(goal), + (traits::GoalKind::DomainGoal)(domain_goal), + (traits::GoalKind::Quantified)(kind, goal), + (traits::GoalKind::CannotProve), + } +} + +impl<'a, 'tcx> Lift<'tcx> for traits::Environment<'a> { + type Lifted = traits::Environment<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.clauses).map(|clauses| { + traits::Environment { + clauses, + } + }) + } +} + +impl<'a, 'tcx, G: Lift<'tcx>> Lift<'tcx> for traits::InEnvironment<'a, G> { + type Lifted = traits::InEnvironment<'tcx, G::Lifted>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.environment).and_then(|environment| { + tcx.lift(&self.goal).map(|goal| { + traits::InEnvironment { + environment, + goal, + } + }) + }) + } +} + +impl<'tcx, C> Lift<'tcx> for chalk_engine::ExClause +where + C: chalk_engine::context::Context + Clone, + C: traits::ExClauseLift<'tcx>, +{ + type Lifted = C::LiftedExClause; + + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + ::lift_ex_clause_to_tcx(self, tcx) + } +} + /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. @@ -388,6 +780,12 @@ BraceStructTypeFoldableImpl! { } where N: TypeFoldable<'tcx> } +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableTraitAliasData<'tcx, N> { + alias_def_id, substs, nested + } where N: TypeFoldable<'tcx> +} + EnumTypeFoldableImpl! { impl<'tcx, N> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> { (traits::VtableImpl)(a), @@ -398,6 +796,7 @@ EnumTypeFoldableImpl! { (traits::VtableParam)(a), (traits::VtableBuiltin)(a), (traits::VtableObject)(a), + (traits::VtableTraitAlias)(a), } where N: TypeFoldable<'tcx> } @@ -408,123 +807,6 @@ BraceStructTypeFoldableImpl! { } where T: TypeFoldable<'tcx> } -impl<'tcx> fmt::Display for traits::WhereClause<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::WhereClause::*; - - match self { - Implemented(trait_ref) => write!(fmt, "Implemented({})", trait_ref), - ProjectionEq(projection) => write!(fmt, "ProjectionEq({})", projection), - RegionOutlives(predicate) => write!(fmt, "RegionOutlives({})", predicate), - TypeOutlives(predicate) => write!(fmt, "TypeOutlives({})", predicate), - } - } -} - -impl<'tcx> fmt::Display for traits::WellFormed<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::WellFormed::*; - - match self { - Trait(trait_ref) => write!(fmt, "WellFormed({})", trait_ref), - Ty(ty) => write!(fmt, "WellFormed({})", ty), - } - } -} - -impl<'tcx> fmt::Display for traits::FromEnv<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::FromEnv::*; - - match self { - Trait(trait_ref) => write!(fmt, "FromEnv({})", trait_ref), - Ty(ty) => write!(fmt, "FromEnv({})", ty), - } - } -} - -impl<'tcx> fmt::Display for traits::DomainGoal<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::DomainGoal::*; - - match self { - Holds(wc) => write!(fmt, "{}", wc), - WellFormed(wf) => write!(fmt, "{}", wf), - FromEnv(from_env) => write!(fmt, "{}", from_env), - Normalize(projection) => write!(fmt, "Normalize({})", projection), - } - } -} - -impl fmt::Display for traits::QuantifierKind { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::QuantifierKind::*; - - match self { - Universal => write!(fmt, "forall"), - Existential => write!(fmt, "exists"), - } - } -} - -impl<'tcx> fmt::Display for traits::Goal<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::GoalKind::*; - - match self { - Implies(hypotheses, goal) => { - write!(fmt, "if (")?; - for (index, hyp) in hypotheses.iter().enumerate() { - if index > 0 { - write!(fmt, ", ")?; - } - write!(fmt, "{}", hyp)?; - } - write!(fmt, ") {{ {} }}", goal) - } - And(goal1, goal2) => write!(fmt, "({} && {})", goal1, goal2), - Not(goal) => write!(fmt, "not {{ {} }}", goal), - DomainGoal(goal) => write!(fmt, "{}", goal), - Quantified(qkind, goal) => { - // FIXME: appropriate binder names - write!(fmt, "{}<> {{ {} }}", qkind, goal.skip_binder()) - } - CannotProve => write!(fmt, "CannotProve"), - } - } -} - -impl<'tcx> fmt::Display for traits::ProgramClause<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let traits::ProgramClause { goal, hypotheses, .. } = self; - write!(fmt, "{}", goal)?; - if !hypotheses.is_empty() { - write!(fmt, " :- ")?; - for (index, condition) in hypotheses.iter().enumerate() { - if index > 0 { - write!(fmt, ", ")?; - } - write!(fmt, "{}", condition)?; - } - } - write!(fmt, ".") - } -} - -impl<'tcx> fmt::Display for traits::Clause<'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::Clause::*; - - match self { - Implies(clause) => write!(fmt, "{}", clause), - ForAll(clause) => { - // FIXME: appropriate binder names - write!(fmt, "forall<> {{ {} }}", clause.skip_binder()) - } - } - } -} - EnumTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for traits::WhereClause<'tcx> { (traits::WhereClause::Implemented)(trait_ref), @@ -534,16 +816,6 @@ EnumTypeFoldableImpl! { } } -EnumLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for traits::WhereClause<'a> { - type Lifted = traits::WhereClause<'tcx>; - (traits::WhereClause::Implemented)(trait_ref), - (traits::WhereClause::ProjectionEq)(projection), - (traits::WhereClause::TypeOutlives)(ty_outlives), - (traits::WhereClause::RegionOutlives)(region_outlives), - } -} - EnumTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for traits::WellFormed<'tcx> { (traits::WellFormed::Trait)(trait_ref), @@ -551,14 +823,6 @@ EnumTypeFoldableImpl! { } } -EnumLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for traits::WellFormed<'a> { - type Lifted = traits::WellFormed<'tcx>; - (traits::WellFormed::Trait)(trait_ref), - (traits::WellFormed::Ty)(ty), - } -} - EnumTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for traits::FromEnv<'tcx> { (traits::FromEnv::Trait)(trait_ref), @@ -566,14 +830,6 @@ EnumTypeFoldableImpl! { } } -EnumLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for traits::FromEnv<'a> { - type Lifted = traits::FromEnv<'tcx>; - (traits::FromEnv::Trait)(trait_ref), - (traits::FromEnv::Ty)(ty), - } -} - EnumTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for traits::DomainGoal<'tcx> { (traits::DomainGoal::Holds)(wc), @@ -583,16 +839,6 @@ EnumTypeFoldableImpl! { } } -EnumLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for traits::DomainGoal<'a> { - type Lifted = traits::DomainGoal<'tcx>; - (traits::DomainGoal::Holds)(wc), - (traits::DomainGoal::WellFormed)(wf), - (traits::DomainGoal::FromEnv)(from_env), - (traits::DomainGoal::Normalize)(projection), - } -} - CloneTypeFoldableAndLiftImpls! { traits::QuantifierKind, } @@ -608,18 +854,6 @@ EnumTypeFoldableImpl! { } } -EnumLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for traits::GoalKind<'a> { - type Lifted = traits::GoalKind<'tcx>; - (traits::GoalKind::Implies)(hypotheses, goal), - (traits::GoalKind::And)(goal1, goal2), - (traits::GoalKind::Not)(goal), - (traits::GoalKind::DomainGoal)(domain_goal), - (traits::GoalKind::Quantified)(kind, goal), - (traits::GoalKind::CannotProve), - } -} - impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let v = self.iter() @@ -674,31 +908,6 @@ BraceStructTypeFoldableImpl! { } where G: TypeFoldable<'tcx> } -impl<'a, 'tcx> Lift<'tcx> for traits::Environment<'a> { - type Lifted = traits::Environment<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.clauses).map(|clauses| { - traits::Environment { - clauses, - } - }) - } -} - -impl<'a, 'tcx, G: Lift<'tcx>> Lift<'tcx> for traits::InEnvironment<'a, G> { - type Lifted = traits::InEnvironment<'tcx, G::Lifted>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.environment).and_then(|environment| { - tcx.lift(&self.goal).map(|goal| { - traits::InEnvironment { - environment, - goal, - } - }) - }) - } -} - impl<'tcx> TypeFoldable<'tcx> for traits::Clauses<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let v = self.iter() @@ -733,18 +942,6 @@ where } } -impl<'tcx, C> Lift<'tcx> for chalk_engine::ExClause -where - C: chalk_engine::context::Context + Clone, - C: traits::ExClauseLift<'tcx>, -{ - type Lifted = C::LiftedExClause; - - fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { - ::lift_ex_clause_to_tcx(self, tcx) - } -} - EnumTypeFoldableImpl! { impl<'tcx, C> TypeFoldable<'tcx> for chalk_engine::DelayedLiteral { (chalk_engine::DelayedLiteral::CannotProve)(a), diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs index 0b7526dd78..3a882506b4 100644 --- a/src/librustc/traits/util.rs +++ b/src/librustc/traits/util.rs @@ -103,11 +103,10 @@ pub fn elaborate_trait_ref<'cx, 'gcx, 'tcx>( pub fn elaborate_trait_refs<'cx, 'gcx, 'tcx>( tcx: TyCtxt<'cx, 'gcx, 'tcx>, - trait_refs: &[ty::PolyTraitRef<'tcx>]) + trait_refs: impl Iterator>) -> Elaborator<'cx, 'gcx, 'tcx> { - let predicates = trait_refs.iter() - .map(|trait_ref| trait_ref.to_predicate()) + let predicates = trait_refs.map(|trait_ref| trait_ref.to_predicate()) .collect(); elaborate_predicates(tcx, predicates) } @@ -201,8 +200,10 @@ impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { } let visited = &mut self.visited; + let mut components = smallvec![]; + tcx.push_outlives_components(ty_max, &mut components); self.stack.extend( - tcx.outlives_components(ty_max) + components .into_iter() .filter_map(|component| match component { Component::Region(r) => if r.is_late_bound() { @@ -271,7 +272,7 @@ pub fn supertraits<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>, } pub fn transitive_bounds<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>, - bounds: &[ty::PolyTraitRef<'tcx>]) + bounds: impl Iterator>) -> Supertraits<'cx, 'gcx, 'tcx> { elaborate_trait_refs(tcx, bounds).filter_to_traits() @@ -334,7 +335,7 @@ impl FilterToTraits { } } -impl<'tcx,I:Iterator>> Iterator for FilterToTraits { +impl<'tcx, I: Iterator>> Iterator for FilterToTraits { type Item = ty::PolyTraitRef<'tcx>; fn next(&mut self) -> Option> { diff --git a/src/librustc/ty/_match.rs b/src/librustc/ty/_match.rs index c9b0e97c9b..d20b6d3619 100644 --- a/src/librustc/ty/_match.rs +++ b/src/librustc/ty/_match.rs @@ -34,7 +34,7 @@ pub struct Match<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { impl<'a, 'gcx, 'tcx> Match<'a, 'gcx, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Match<'a, 'gcx, 'tcx> { - Match { tcx: tcx } + Match { tcx } } } diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs index 3263da8fda..83521c5f72 100644 --- a/src/librustc/ty/adjustment.rs +++ b/src/librustc/ty/adjustment.rs @@ -48,7 +48,7 @@ use ty::subst::Substs; /// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about /// the underlying conversions from `[i32; 4]` to `[i32]`. /// -/// 3. Coercing a `Box` to `Box` is an interesting special case. In +/// 3. Coercing a `Box` to `Box` is an interesting special case. In /// that case, we have the pointer we need coming in, so there are no /// autoderefs, and no autoref. Instead we just do the `Unsize` transformation. /// At some point, of course, `Box` should move out of the compiler, in which diff --git a/src/librustc/ty/constness.rs b/src/librustc/ty/constness.rs index 47aea7a5f0..e32913b890 100644 --- a/src/librustc/ty/constness.rs +++ b/src/librustc/ty/constness.rs @@ -66,7 +66,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { } } else { // users enabling the `const_fn` feature gate can do what they want - !self.sess.features_untracked().const_fn + !self.features().const_fn } } } diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index f3fe53444b..42a4de1682 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -45,7 +45,7 @@ use ty::RegionKind; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TyKind::*; use ty::GenericParamDefKind; -use ty::layout::{LayoutDetails, TargetDataLayout}; +use ty::layout::{LayoutDetails, TargetDataLayout, VariantIdx}; use ty::query; use ty::steal::Steal; use ty::BindingMode; @@ -53,6 +53,7 @@ use ty::CanonicalTy; use ty::CanonicalPolyFnSig; use util::nodemap::{DefIdMap, DefIdSet, ItemLocalMap}; use util::nodemap::{FxHashMap, FxHashSet}; +use rustc_data_structures::interner::HashInterner; use smallvec::SmallVec; use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap, StableHasher, StableHasherResult, @@ -113,7 +114,7 @@ pub struct GlobalArenas<'tcx> { const_allocs: TypedArena, } -type InternedSet<'tcx, T> = Lock>>; +type InternedSet<'tcx, T> = Lock, ()>>; pub struct CtxtInterners<'tcx> { /// The arena that types, regions, etc are allocated from @@ -155,6 +156,7 @@ impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { } /// Intern a type + #[inline(never)] fn intern_ty( local: &CtxtInterners<'tcx>, global: &CtxtInterners<'gcx>, @@ -166,56 +168,45 @@ impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { // determine that all contents are in the global tcx. // See comments on Lift for why we can't use that. if flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) { - let mut interner = local.type_.borrow_mut(); - if let Some(&Interned(ty)) = interner.get(&st) { - return ty; - } + local.type_.borrow_mut().intern(st, |st| { + let ty_struct = TyS { + sty: st, + flags: flags.flags, + outer_exclusive_binder: flags.outer_exclusive_binder, + }; - let ty_struct = TyS { - sty: st, - flags: flags.flags, - outer_exclusive_binder: flags.outer_exclusive_binder, - }; + // Make sure we don't end up with inference + // types/regions in the global interner + if local as *const _ as usize == global as *const _ as usize { + bug!("Attempted to intern `{:?}` which contains \ + inference types/regions in the global type context", + &ty_struct); + } - // Make sure we don't end up with inference - // types/regions in the global interner - if local as *const _ as usize == global as *const _ as usize { - bug!("Attempted to intern `{:?}` which contains \ - inference types/regions in the global type context", - &ty_struct); - } - - // Don't be &mut TyS. - let ty: Ty<'tcx> = local.arena.alloc(ty_struct); - interner.insert(Interned(ty)); - ty + Interned(local.arena.alloc(ty_struct)) + }).0 } else { - let mut interner = global.type_.borrow_mut(); - if let Some(&Interned(ty)) = interner.get(&st) { - return ty; - } + global.type_.borrow_mut().intern(st, |st| { + let ty_struct = TyS { + sty: st, + flags: flags.flags, + outer_exclusive_binder: flags.outer_exclusive_binder, + }; - let ty_struct = TyS { - sty: st, - flags: flags.flags, - outer_exclusive_binder: flags.outer_exclusive_binder, - }; + // This is safe because all the types the ty_struct can point to + // already is in the global arena + let ty_struct: TyS<'gcx> = unsafe { + mem::transmute(ty_struct) + }; - // This is safe because all the types the ty_struct can point to - // already is in the global arena - let ty_struct: TyS<'gcx> = unsafe { - mem::transmute(ty_struct) - }; - - // Don't be &mut TyS. - let ty: Ty<'gcx> = global.arena.alloc(ty_struct); - interner.insert(Interned(ty)); - ty + Interned(global.arena.alloc(ty_struct)) + }).0 } } } pub struct CommonTypes<'tcx> { + pub unit: Ty<'tcx>, pub bool: Ty<'tcx>, pub char: Ty<'tcx>, pub isize: Ty<'tcx>, @@ -446,22 +437,22 @@ impl<'tcx> TypeckTables<'tcx> { pub fn empty(local_id_root: Option) -> TypeckTables<'tcx> { TypeckTables { local_id_root, - type_dependent_defs: ItemLocalMap(), - field_indices: ItemLocalMap(), - user_provided_tys: ItemLocalMap(), + type_dependent_defs: Default::default(), + field_indices: Default::default(), + user_provided_tys: Default::default(), user_provided_sigs: Default::default(), - node_types: ItemLocalMap(), - node_substs: ItemLocalMap(), - user_substs: ItemLocalMap(), - adjustments: ItemLocalMap(), - pat_binding_modes: ItemLocalMap(), - pat_adjustments: ItemLocalMap(), + node_types: Default::default(), + node_substs: Default::default(), + user_substs: Default::default(), + adjustments: Default::default(), + pat_binding_modes: Default::default(), + pat_adjustments: Default::default(), upvar_capture_map: Default::default(), - closure_kind_origins: ItemLocalMap(), - liberated_fn_sigs: ItemLocalMap(), - fru_field_types: ItemLocalMap(), - cast_kinds: ItemLocalMap(), - used_trait_imports: Lrc::new(DefIdSet()), + closure_kind_origins: Default::default(), + liberated_fn_sigs: Default::default(), + fru_field_types: Default::default(), + cast_kinds: Default::default(), + used_trait_imports: Lrc::new(Default::default()), tainted_by_errors: false, free_region_map: Default::default(), concrete_existential_types: Default::default(), @@ -789,7 +780,7 @@ impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { pat_adjustments.hash_stable(hcx, hasher); hash_stable_hashmap(hcx, hasher, upvar_capture_map, |up_var_id, hcx| { let ty::UpvarId { - var_id, + var_path, closure_expr_id } = *up_var_id; @@ -798,14 +789,14 @@ impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { let var_owner_def_id = DefId { krate: local_id_root.krate, - index: var_id.owner, + index: var_path.hir_id.owner, }; let closure_def_id = DefId { krate: local_id_root.krate, index: closure_expr_id.to_def_id().index, }; (hcx.def_path_hash(var_owner_def_id), - var_id.local_id, + var_path.hir_id.local_id, hcx.def_path_hash(closure_def_id)) }); @@ -823,22 +814,15 @@ impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { impl<'tcx> CommonTypes<'tcx> { fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> { - // Ensure our type representation does not grow - #[cfg(target_pointer_width = "64")] - static_assert!(ASSERT_TY_KIND: ::std::mem::size_of::>() <= 24); - #[cfg(target_pointer_width = "64")] - static_assert!(ASSERT_TYS: ::std::mem::size_of::>() <= 32); - let mk = |sty| CtxtInterners::intern_ty(interners, interners, sty); let mk_region = |r| { - if let Some(r) = interners.region.borrow().get(&r) { - return r.0; - } - let r = interners.arena.alloc(r); - interners.region.borrow_mut().insert(Interned(r)); - &*r + interners.region.borrow_mut().intern(r, |r| { + Interned(interners.arena.alloc(r)) + }).0 }; + CommonTypes { + unit: mk(Tuple(List::empty())), bool: mk(Bool), char: mk(Char), never: mk(Never), @@ -882,7 +866,7 @@ pub struct FreeRegionInfo { /// various **compiler queries** that have been performed. See the /// [rustc guide] for more details. /// -/// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ty.html +/// [rustc guide]: https://rust-lang.github.io/rustc-guide/ty.html #[derive(Copy, Clone)] pub struct TyCtxt<'a, 'gcx: 'tcx, 'tcx: 'a> { gcx: &'a GlobalCtxt<'gcx>, @@ -891,6 +875,7 @@ pub struct TyCtxt<'a, 'gcx: 'tcx, 'tcx: 'a> { impl<'a, 'gcx, 'tcx> Deref for TyCtxt<'a, 'gcx, 'tcx> { type Target = &'a GlobalCtxt<'gcx>; + #[inline(always)] fn deref(&self) -> &Self::Target { &self.gcx } @@ -956,14 +941,14 @@ pub struct GlobalCtxt<'tcx> { /// Data layout specification for the current target. pub data_layout: TargetDataLayout, - stability_interner: Lock>, + stability_interner: Lock>, /// Stores the value of constants (and deduplicates the actual memory) - allocation_interner: Lock>, + allocation_interner: Lock>, pub alloc_map: Lock>, - layout_interner: Lock>, + layout_interner: Lock>, /// A general purpose channel to throw data out the back towards LLVM worker /// threads. @@ -1009,7 +994,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn alloc_adt_def(self, did: DefId, kind: AdtKind, - variants: Vec, + variants: IndexVec, repr: ReprOptions) -> &'gcx ty::AdtDef { let def = ty::AdtDef::new(self, did, kind, variants, repr); @@ -1046,50 +1031,29 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self, alloc: Allocation, ) -> &'gcx Allocation { - let allocs = &mut self.allocation_interner.borrow_mut(); - if let Some(alloc) = allocs.get(&alloc) { - return alloc; - } - - let interned = self.global_arenas.const_allocs.alloc(alloc); - if let Some(prev) = allocs.replace(interned) { // insert into interner - bug!("Tried to overwrite interned Allocation: {:#?}", prev) - } - interned + self.allocation_interner.borrow_mut().intern(alloc, |alloc| { + self.global_arenas.const_allocs.alloc(alloc) + }) } /// Allocates a byte or string literal for `mir::interpret`, read-only pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId { // create an allocation that just contains these bytes - let alloc = interpret::Allocation::from_byte_aligned_bytes(bytes); + let alloc = interpret::Allocation::from_byte_aligned_bytes(bytes, ()); let alloc = self.intern_const_alloc(alloc); self.alloc_map.lock().allocate(alloc) } pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability { - let mut stability_interner = self.stability_interner.borrow_mut(); - if let Some(st) = stability_interner.get(&stab) { - return st; - } - - let interned = self.global_interners.arena.alloc(stab); - if let Some(prev) = stability_interner.replace(interned) { - bug!("Tried to overwrite interned Stability: {:?}", prev) - } - interned + self.stability_interner.borrow_mut().intern(stab, |stab| { + self.global_interners.arena.alloc(stab) + }) } pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails { - let mut layout_interner = self.layout_interner.borrow_mut(); - if let Some(layout) = layout_interner.get(&layout) { - return layout; - } - - let interned = self.global_arenas.layout.alloc(layout); - if let Some(prev) = layout_interner.replace(interned) { - bug!("Tried to overwrite interned Layout: {:?}", prev) - } - interned + self.layout_interner.borrow_mut().intern(layout, |layout| { + self.global_arenas.layout.alloc(layout) + }) } /// Returns a range of the start/end indices specified with the @@ -1539,15 +1503,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// Should we emit EndRegion MIR statements? These are consumed by - /// MIR borrowck, but not when NLL is used. They are also consumed - /// by the validation stuff. - pub fn emit_end_regions(self) -> bool { - self.sess.opts.debugging_opts.emit_end_regions || - self.sess.opts.debugging_opts.mir_emit_validate > 0 || - self.use_mir_borrowck() - } - #[inline] pub fn local_crate_exports_generics(self) -> bool { debug_assert!(self.sess.opts.share_generics()); @@ -2208,7 +2163,7 @@ macro_rules! sty_debug_print { }; $(let mut $variant = total;)* - for &Interned(t) in tcx.interners.type_.borrow().iter() { + for &Interned(t) in tcx.interners.type_.borrow().keys() { let variant = match t.sty { ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Str | ty::Never => continue, @@ -2251,8 +2206,8 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { pub fn print_debug_stats(self) { sty_debug_print!( self, - Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr, - Generator, GeneratorWitness, Dynamic, Closure, Tuple, + Adt, Array, Slice, RawPtr, Ref, FnDef, FnPtr, Placeholder, + Generator, GeneratorWitness, Dynamic, Closure, Tuple, Bound, Param, Infer, UnnormalizedProjection, Projection, Opaque, Foreign); println!("Substs interner: #{}", self.interners.substs.borrow().len()); @@ -2267,6 +2222,13 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { /// An entry in an interner. struct Interned<'tcx, T: 'tcx+?Sized>(&'tcx T); +impl<'tcx, T: 'tcx+?Sized> Clone for Interned<'tcx, T> { + fn clone(&self) -> Self { + Interned(self.0) + } +} +impl<'tcx, T: 'tcx+?Sized> Copy for Interned<'tcx, T> {} + // NB: An Interned compares and hashes as a sty. impl<'tcx> PartialEq for Interned<'tcx, TyS<'tcx>> { fn eq(&self, other: &Interned<'tcx, TyS<'tcx>>) -> bool { @@ -2387,37 +2349,28 @@ macro_rules! intern_method { // determine that all contents are in the global tcx. // See comments on Lift for why we can't use that. if ($keep_in_local_tcx)(&v) { - let mut interner = self.interners.$name.borrow_mut(); - if let Some(&Interned(v)) = interner.get(key) { - return v; - } + self.interners.$name.borrow_mut().intern_ref(key, || { + // Make sure we don't end up with inference + // types/regions in the global tcx. + if self.is_global() { + bug!("Attempted to intern `{:?}` which contains \ + inference types/regions in the global type context", + v); + } - // Make sure we don't end up with inference - // types/regions in the global tcx. - if self.is_global() { - bug!("Attempted to intern `{:?}` which contains \ - inference types/regions in the global type context", - v); - } - - let i = $alloc_method(&self.interners.arena, v); - interner.insert(Interned(i)); - i + Interned($alloc_method(&self.interners.arena, v)) + }).0 } else { - let mut interner = self.global_interners.$name.borrow_mut(); - if let Some(&Interned(v)) = interner.get(key) { - return v; - } - - // This transmutes $alloc<'tcx> to $alloc<'gcx> - let v = unsafe { - mem::transmute(v) - }; - let i: &$lt_tcx $ty = $alloc_method(&self.global_interners.arena, v); - // Cast to 'gcx - let i = unsafe { mem::transmute(i) }; - interner.insert(Interned(i)); - i + self.global_interners.$name.borrow_mut().intern_ref(key, || { + // This transmutes $alloc<'tcx> to $alloc<'gcx> + let v = unsafe { + mem::transmute(v) + }; + let i: &$lt_tcx $ty = $alloc_method(&self.global_interners.arena, v); + // Cast to 'gcx + let i = unsafe { mem::transmute(i) }; + Interned(i) + }).0 } } } @@ -2530,6 +2483,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_fn_ptr(converted_sig) } + #[inline] pub fn mk_ty(&self, st: TyKind<'tcx>) -> Ty<'tcx> { CtxtInterners::intern_ty(&self.interners, &self.global_interners, st) } @@ -2563,19 +2517,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } + #[inline] pub fn mk_str(self) -> Ty<'tcx> { self.mk_ty(Str) } + #[inline] pub fn mk_static_str(self) -> Ty<'tcx> { self.mk_imm_ref(self.types.re_static, self.mk_str()) } + #[inline] pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { // take a copy of substs so that we own the vectors inside self.mk_ty(Adt(def, substs)) } + #[inline] pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> { self.mk_ty(Foreign(def_id)) } @@ -2599,42 +2557,52 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(Adt(adt_def, substs)) } + #[inline] pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { self.mk_ty(RawPtr(tm)) } + #[inline] pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { self.mk_ty(Ref(r, tm.ty, tm.mutbl)) } + #[inline] pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable}) } + #[inline] pub fn mk_imm_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable}) } + #[inline] pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable}) } + #[inline] pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable}) } + #[inline] pub fn mk_nil_ptr(self) -> Ty<'tcx> { self.mk_imm_ptr(self.mk_unit()) } + #[inline] pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> { self.mk_ty(Array(ty, ty::Const::from_usize(self, n))) } + #[inline] pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ty(Slice(ty)) } + #[inline] pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> { self.mk_ty(Tuple(self.intern_type_list(ts))) } @@ -2643,10 +2611,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(ts)))) } + #[inline] pub fn mk_unit(self) -> Ty<'tcx> { - self.intern_tup(&[]) + self.types.unit } + #[inline] pub fn mk_diverging_default(self) -> Ty<'tcx> { if self.features().never_type { self.types.never @@ -2655,19 +2625,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } + #[inline] pub fn mk_bool(self) -> Ty<'tcx> { self.mk_ty(Bool) } + #[inline] pub fn mk_fn_def(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { self.mk_ty(FnDef(def_id, substs)) } + #[inline] pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> { self.mk_ty(FnPtr(fty)) } + #[inline] pub fn mk_dynamic( self, obj: ty::Binder<&'tcx List>>, @@ -2676,6 +2650,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(Dynamic(obj, reg)) } + #[inline] pub fn mk_projection(self, item_def_id: DefId, substs: &'tcx Substs<'tcx>) @@ -2686,11 +2661,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { })) } + #[inline] pub fn mk_closure(self, closure_id: DefId, closure_substs: ClosureSubsts<'tcx>) -> Ty<'tcx> { self.mk_ty(Closure(closure_id, closure_substs)) } + #[inline] pub fn mk_generator(self, id: DefId, generator_substs: GeneratorSubsts<'tcx>, @@ -2699,32 +2676,39 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(Generator(id, generator_substs, movability)) } + #[inline] pub fn mk_generator_witness(self, types: ty::Binder<&'tcx List>>) -> Ty<'tcx> { self.mk_ty(GeneratorWitness(types)) } + #[inline] pub fn mk_var(self, v: TyVid) -> Ty<'tcx> { self.mk_infer(TyVar(v)) } + #[inline] pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> { self.mk_infer(IntVar(v)) } + #[inline] pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> { self.mk_infer(FloatVar(v)) } + #[inline] pub fn mk_infer(self, it: InferTy) -> Ty<'tcx> { self.mk_ty(Infer(it)) } + #[inline] pub fn mk_ty_param(self, index: u32, name: InternedString) -> Ty<'tcx> { self.mk_ty(Param(ParamTy { idx: index, name: name })) } + #[inline] pub fn mk_self_type(self) -> Ty<'tcx> { self.mk_ty_param(0, keywords::SelfType.name().as_interned_str()) } @@ -2738,6 +2722,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } + #[inline] pub fn mk_opaque(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { self.mk_ty(Opaque(def_id, substs)) } diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index ed6e372fe7..90022a770c 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -53,8 +53,6 @@ pub enum TypeError<'tcx> { ProjectionMismatched(ExpectedFound), ProjectionBoundsLength(ExpectedFound), ExistentialMismatch(ExpectedFound<&'tcx ty::List>>), - - OldStyleLUB(Box>), } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)] @@ -166,9 +164,6 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { report_maybe_different(f, &format!("trait `{}`", values.expected), &format!("trait `{}`", values.found)) } - OldStyleLUB(ref err) => { - write!(f, "{}", err) - } } } } @@ -217,7 +212,8 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { ty::Infer(ty::TyVar(_)) => "inferred type".into(), ty::Infer(ty::IntVar(_)) => "integral variable".into(), ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(), - ty::Infer(ty::BoundTy(_)) | + ty::Placeholder(..) => "placeholder type".into(), + ty::Bound(..) => "bound type".into(), ty::Infer(ty::FreshTy(_)) => "fresh type".into(), ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(), ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(), @@ -266,12 +262,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } }, - OldStyleLUB(err) => { - db.note("this was previously accepted by the compiler but has been phased out"); - db.note("for more information, see https://github.com/rust-lang/rust/issues/45852"); - - self.note_and_explain_type_err(db, &err, sp); - } CyclicTy(ty) => { // Watch out for various cases of cyclic types and try to explain. if ty.is_closure() || ty.is_generator() { diff --git a/src/librustc/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs index e6aaf8b1bb..8304e36381 100644 --- a/src/librustc/ty/fast_reject.rs +++ b/src/librustc/ty/fast_reject.rs @@ -122,7 +122,7 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::Foreign(def_id) => { Some(ForeignSimplifiedType(def_id)) } - ty::Infer(_) | ty::Error => None, + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) | ty::Error => None, } } diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs index a7b21688fb..1ea7e27c0d 100644 --- a/src/librustc/ty/flags.rs +++ b/src/librustc/ty/flags.rs @@ -115,15 +115,21 @@ impl FlagComputation { self.add_substs(&substs.substs); } + &ty::Bound(debruijn, _) => { + self.add_binder(debruijn); + } + + &ty::Placeholder(..) => { + self.add_flags(TypeFlags::HAS_TY_PLACEHOLDER); + } + &ty::Infer(infer) => { self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); // it might, right? self.add_flags(TypeFlags::HAS_TY_INFER); match infer { ty::FreshTy(_) | ty::FreshIntTy(_) | - ty::FreshFloatTy(_) | - ty::BoundTy(_) => { - self.add_flags(TypeFlags::HAS_CANONICAL_VARS); + ty::FreshFloatTy(_) => { } ty::TyVar(_) | @@ -141,7 +147,7 @@ impl FlagComputation { &ty::Projection(ref data) => { // currently we can't normalize projections that // include bound regions, so track those separately. - if !data.has_escaping_regions() { + if !data.has_escaping_bound_vars() { self.add_flags(TypeFlags::HAS_NORMALIZABLE_PROJECTION); } self.add_flags(TypeFlags::HAS_PROJECTION); diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs index 80dfd263af..20f64597b7 100644 --- a/src/librustc/ty/fold.rs +++ b/src/librustc/ty/fold.rs @@ -67,22 +67,22 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { /// bound by `binder` or bound by some binder outside of `binder`. /// If `binder` is `ty::INNERMOST`, this indicates whether /// there are any late-bound regions that appear free. - fn has_regions_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool { - self.visit_with(&mut HasEscapingRegionsVisitor { outer_index: binder }) + fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool { + self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder }) } /// True if this `self` has any regions that escape `binder` (and /// hence are not bound by it). - fn has_regions_bound_above(&self, binder: ty::DebruijnIndex) -> bool { - self.has_regions_bound_at_or_above(binder.shifted_in(1)) + fn has_vars_bound_above(&self, binder: ty::DebruijnIndex) -> bool { + self.has_vars_bound_at_or_above(binder.shifted_in(1)) } - fn has_escaping_regions(&self) -> bool { - self.has_regions_bound_at_or_above(ty::INNERMOST) + fn has_escaping_bound_vars(&self) -> bool { + self.has_vars_bound_at_or_above(ty::INNERMOST) } fn has_type_flags(&self, flags: TypeFlags) -> bool { - self.visit_with(&mut HasTypeFlagsVisitor { flags: flags }) + self.visit_with(&mut HasTypeFlagsVisitor { flags }) } fn has_projections(&self) -> bool { self.has_type_flags(TypeFlags::HAS_PROJECTION) @@ -102,14 +102,14 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { fn needs_infer(&self) -> bool { self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER) } - fn has_skol(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_RE_SKOL) + fn has_placeholders(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_RE_PLACEHOLDER | TypeFlags::HAS_TY_PLACEHOLDER) } fn needs_subst(&self) -> bool { self.has_type_flags(TypeFlags::NEEDS_SUBST) } - fn has_re_skol(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_RE_SKOL) + fn has_re_placeholders(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_RE_PLACEHOLDER) } fn has_closure_types(&self) -> bool { self.has_type_flags(TypeFlags::HAS_TY_CLOSURE) @@ -374,6 +374,7 @@ pub struct RegionFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> RegionFolder<'a, 'gcx, 'tcx> { + #[inline] pub fn new( tcx: TyCtxt<'a, 'gcx, 'tcx>, skipped_regions: &'a mut bool, @@ -416,11 +417,10 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> { } /////////////////////////////////////////////////////////////////////////// -// Late-bound region replacer +// Bound vars replacer -// Replaces the escaping regions in a type. - -struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { +/// Replaces the escaping bound vars (late bound regions or bound types) in a type. +struct BoundVarReplacer<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, /// As with `RegionFolder`, represents the index of a binder *just outside* @@ -428,7 +428,82 @@ struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { current_index: ty::DebruijnIndex, fld_r: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a), - map: BTreeMap> + fld_t: &'a mut (dyn FnMut(ty::BoundTy) -> ty::Ty<'tcx> + 'a), +} + +impl<'a, 'gcx, 'tcx> BoundVarReplacer<'a, 'gcx, 'tcx> { + fn new( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + fld_r: &'a mut F, + fld_t: &'a mut G + ) -> Self + where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>, + G: FnMut(ty::BoundTy) -> ty::Ty<'tcx> + { + BoundVarReplacer { + tcx, + current_index: ty::INNERMOST, + fld_r, + fld_t, + } + } +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for BoundVarReplacer<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } + + fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + self.current_index.shift_in(1); + let t = t.super_fold_with(self); + self.current_index.shift_out(1); + t + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + match t.sty { + ty::Bound(debruijn, bound_ty) => { + if debruijn == self.current_index { + let fld_t = &mut self.fld_t; + let ty = fld_t(bound_ty); + ty::fold::shift_vars( + self.tcx, + &ty, + self.current_index.as_u32() + ) + } else { + t + } + } + _ => { + if !t.has_vars_bound_at_or_above(self.current_index) { + // Nothing more to substitute. + t + } else { + t.super_fold_with(self) + } + } + } + } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + match *r { + ty::ReLateBound(debruijn, br) if debruijn == self.current_index => { + let fld_r = &mut self.fld_r; + let region = fld_r(br); + if let ty::ReLateBound(debruijn1, br) = *region { + // If the callback returns a late-bound region, + // that region should always use the INNERMOST + // debruijn index. Then we adjust it to the + // correct depth. + assert_eq!(debruijn1, ty::INNERMOST); + self.tcx.mk_region(ty::ReLateBound(debruijn, br)) + } else { + region + } + } + _ => r + } + } } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { @@ -440,16 +515,63 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// same `BoundRegion` will reuse the previous result. A map is /// returned at the end with each bound region and the free region /// that replaced it. - pub fn replace_late_bound_regions(self, + /// + /// This method only replaces late bound regions and the result may still + /// contain escaping bound types. + pub fn replace_late_bound_regions( + self, value: &Binder, - mut f: F) - -> (T, BTreeMap>) - where F : FnMut(ty::BoundRegion) -> ty::Region<'tcx>, - T : TypeFoldable<'tcx>, + fld_r: F + ) -> (T, BTreeMap>) + where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>, + T: TypeFoldable<'tcx> { - let mut replacer = RegionReplacer::new(self, &mut f); - let result = value.skip_binder().fold_with(&mut replacer); - (result, replacer.map) + // identity for bound types + let fld_t = |bound_ty| self.mk_ty(ty::Bound(ty::INNERMOST, bound_ty)); + self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t) + } + + /// Replace all escaping bound vars. The `fld_r` closure replaces escaping + /// bound regions while the `fld_t` closure replaces escaping bound types. + pub fn replace_escaping_bound_vars( + self, + value: &T, + mut fld_r: F, + mut fld_t: G + ) -> (T, BTreeMap>) + where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>, + G: FnMut(ty::BoundTy) -> ty::Ty<'tcx>, + T: TypeFoldable<'tcx> + { + let mut map = BTreeMap::new(); + + if !value.has_escaping_bound_vars() { + (value.clone(), map) + } else { + let mut real_fld_r = |br| { + *map.entry(br).or_insert_with(|| fld_r(br)) + }; + + let mut replacer = BoundVarReplacer::new(self, &mut real_fld_r, &mut fld_t); + let result = value.fold_with(&mut replacer); + (result, map) + } + } + + /// Replace all types or regions bound by the given `Binder`. The `fld_r` + /// closure replaces bound regions while the `fld_t` closure replaces bound + /// types. + pub fn replace_bound_vars( + self, + value: &Binder, + fld_r: F, + fld_t: G + ) -> (T, BTreeMap>) + where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>, + G: FnMut(ty::BoundTy) -> ty::Ty<'tcx>, + T: TypeFoldable<'tcx> + { + self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t) } /// Replace any late-bound regions bound in `value` with @@ -549,21 +671,40 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } -impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> { - fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, fld_r: &'a mut F) - -> RegionReplacer<'a, 'gcx, 'tcx> - where F : FnMut(ty::BoundRegion) -> ty::Region<'tcx> - { - RegionReplacer { +/////////////////////////////////////////////////////////////////////////// +// Shifter +// +// Shifts the De Bruijn indices on all escaping bound vars by a +// fixed amount. Useful in substitution or when otherwise introducing +// a binding level that is not intended to capture the existing bound +// vars. See comment on `shift_vars_through_binders` method in +// `subst.rs` for more details. + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +enum Direction { + In, + Out, +} + +struct Shifter<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + current_index: ty::DebruijnIndex, + amount: u32, + direction: Direction, +} + +impl Shifter<'a, 'gcx, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, amount: u32, direction: Direction) -> Self { + Shifter { tcx, current_index: ty::INNERMOST, - fld_r, - map: BTreeMap::default() + amount, + direction, } } } -impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { +impl TypeFolder<'gcx, 'tcx> for Shifter<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { @@ -573,64 +714,59 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { t } - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.has_regions_bound_at_or_above(self.current_index) { - return t; - } - - t.super_fold_with(self) - } - fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { match *r { - ty::ReLateBound(debruijn, br) if debruijn == self.current_index => { - let fld_r = &mut self.fld_r; - let region = *self.map.entry(br).or_insert_with(|| fld_r(br)); - if let ty::ReLateBound(debruijn1, br) = *region { - // If the callback returns a late-bound region, - // that region should always use the INNERMOST - // debruijn index. Then we adjust it to the - // correct depth. - assert_eq!(debruijn1, ty::INNERMOST); - self.tcx.mk_region(ty::ReLateBound(debruijn, br)) + ty::ReLateBound(debruijn, br) => { + if self.amount == 0 || debruijn < self.current_index { + r } else { - region + let debruijn = match self.direction { + Direction::In => debruijn.shifted_in(self.amount), + Direction::Out => { + assert!(debruijn.as_u32() >= self.amount); + debruijn.shifted_out(self.amount) + } + }; + let shifted = ty::ReLateBound(debruijn, br); + self.tcx.mk_region(shifted) } } _ => r } } -} -/////////////////////////////////////////////////////////////////////////// -// Region shifter -// -// Shifts the De Bruijn indices on all escaping bound regions by a -// fixed amount. Useful in substitution or when otherwise introducing -// a binding level that is not intended to capture the existing bound -// regions. See comment on `shift_regions_through_binders` method in -// `subst.rs` for more details. + fn fold_ty(&mut self, ty: ty::Ty<'tcx>) -> ty::Ty<'tcx> { + match ty.sty { + ty::Bound(debruijn, bound_ty) => { + if self.amount == 0 || debruijn < self.current_index { + ty + } else { + let debruijn = match self.direction { + Direction::In => debruijn.shifted_in(self.amount), + Direction::Out => { + assert!(debruijn.as_u32() >= self.amount); + debruijn.shifted_out(self.amount) + } + }; + self.tcx.mk_ty( + ty::Bound(debruijn, bound_ty) + ) + } + } -pub fn shift_region(region: ty::RegionKind, amount: u32) -> ty::RegionKind { - match region { - ty::ReLateBound(debruijn, br) => { - ty::ReLateBound(debruijn.shifted_in(amount), br) - } - _ => { - region + _ => ty.super_fold_with(self), } } } -pub fn shift_region_ref<'a, 'gcx, 'tcx>( +pub fn shift_region<'a, 'gcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'tcx>, region: ty::Region<'tcx>, - amount: u32) - -> ty::Region<'tcx> -{ + amount: u32 +) -> ty::Region<'tcx> { match region { - &ty::ReLateBound(debruijn, br) if amount > 0 => { - tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), br)) + ty::ReLateBound(debruijn, br) if amount > 0 => { + tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), *br)) } _ => { region @@ -638,20 +774,30 @@ pub fn shift_region_ref<'a, 'gcx, 'tcx>( } } -pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - amount: u32, - value: &T) -> T - where T: TypeFoldable<'tcx> -{ - debug!("shift_regions(value={:?}, amount={})", +pub fn shift_vars<'a, 'gcx, 'tcx, T>( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + value: &T, + amount: u32 +) -> T where T: TypeFoldable<'tcx> { + debug!("shift_vars(value={:?}, amount={})", value, amount); - value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| { - shift_region_ref(tcx, region, amount) - })) + value.fold_with(&mut Shifter::new(tcx, amount, Direction::In)) } -/// An "escaping region" is a bound region whose binder is not part of `t`. +pub fn shift_out_vars<'a, 'gcx, 'tcx, T>( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + value: &T, + amount: u32 +) -> T where T: TypeFoldable<'tcx> { + debug!("shift_out_vars(value={:?}, amount={})", + value, amount); + + value.fold_with(&mut Shifter::new(tcx, amount, Direction::Out)) +} + +/// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a +/// bound region or a bound type. /// /// So, for example, consider a type like the following, which has two binders: /// @@ -663,24 +809,24 @@ pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>, /// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner /// fn type*, that type has an escaping region: `'a`. /// -/// Note that what I'm calling an "escaping region" is often just called a "free region". However, -/// we already use the term "free region". It refers to the regions that we use to represent bound -/// regions on a fn definition while we are typechecking its body. +/// Note that what I'm calling an "escaping var" is often just called a "free var". However, +/// we already use the term "free var". It refers to the regions or types that we use to represent +/// bound regions or type params on a fn definition while we are type checking its body. /// /// To clarify, conceptually there is no particular difference between -/// an "escaping" region and a "free" region. However, there is a big +/// an "escaping" var and a "free" var. However, there is a big /// difference in practice. Basically, when "entering" a binding /// level, one is generally required to do some sort of processing to -/// a bound region, such as replacing it with a fresh/placeholder -/// region, or making an entry in the environment to represent the -/// scope to which it is attached, etc. An escaping region represents -/// a bound region for which this processing has not yet been done. -struct HasEscapingRegionsVisitor { +/// a bound var, such as replacing it with a fresh/placeholder +/// var, or making an entry in the environment to represent the +/// scope to which it is attached, etc. An escaping var represents +/// a bound var for which this processing has not yet been done. +struct HasEscapingVarsVisitor { /// Anything bound by `outer_index` or "above" is escaping outer_index: ty::DebruijnIndex, } -impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor { +impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor { fn visit_binder>(&mut self, t: &Binder) -> bool { self.outer_index.shift_in(1); let result = t.super_visit_with(self); @@ -693,7 +839,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor { // `outer_index`, that means that `t` contains some content // bound at `outer_index` or above (because // `outer_exclusive_binder` is always 1 higher than the - // content in `t`). Therefore, `t` has some escaping regions. + // content in `t`). Therefore, `t` has some escaping vars. t.outer_exclusive_binder > self.outer_index } @@ -741,7 +887,7 @@ struct LateBoundRegionsCollector { /// If true, we only want regions that are known to be /// "constrained" when you equate this type with another type. In - /// partcular, if you have e.g. `&'a u32` and `&'b u32`, equating + /// particular, if you have e.g. `&'a u32` and `&'b u32`, equating /// them constraints `'a == 'b`. But if you have `<&'a u32 as /// Trait>::Foo` and `<&'b u32 as Trait>::Foo`, normalizing those /// types may mean that `'a` and `'b` don't appear in the results, diff --git a/src/librustc/ty/inhabitedness/mod.rs b/src/librustc/ty/inhabitedness/mod.rs index 56fe479ffc..721d5e14cc 100644 --- a/src/librustc/ty/inhabitedness/mod.rs +++ b/src/librustc/ty/inhabitedness/mod.rs @@ -167,23 +167,16 @@ impl<'a, 'gcx, 'tcx> VariantDef { substs: &'tcx Substs<'tcx>, adt_kind: AdtKind) -> DefIdForest { - match adt_kind { - AdtKind::Union => { - DefIdForest::intersection(tcx, self.fields.iter().map(|f| { - f.uninhabited_from(visited, tcx, substs, false) - })) - }, - AdtKind::Struct => { - DefIdForest::union(tcx, self.fields.iter().map(|f| { - f.uninhabited_from(visited, tcx, substs, false) - })) - }, - AdtKind::Enum => { - DefIdForest::union(tcx, self.fields.iter().map(|f| { - f.uninhabited_from(visited, tcx, substs, true) - })) - }, - } + let is_enum = match adt_kind { + // For now, `union`s are never considered uninhabited. + // The precise semantics of inhabitedness with respect to unions is currently undecided. + AdtKind::Union => return DefIdForest::empty(), + AdtKind::Enum => true, + AdtKind::Struct => false, + }; + DefIdForest::union(tcx, self.fields.iter().map(|f| { + f.uninhabited_from(visited, tcx, substs, is_enum) + })) } } @@ -194,8 +187,8 @@ impl<'a, 'gcx, 'tcx> FieldDef { visited: &mut FxHashMap>>, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &'tcx Substs<'tcx>, - is_enum: bool) -> DefIdForest - { + is_enum: bool, + ) -> DefIdForest { let mut data_uninhabitedness = move || { self.ty(tcx, substs).uninhabited_from(visited, tcx) }; @@ -253,14 +246,16 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { let substs_set = visited.get_mut(&def.did).unwrap(); substs_set.remove(substs); ret - }, + } Never => DefIdForest::full(tcx), + Tuple(ref tys) => { DefIdForest::union(tcx, tys.iter().map(|ty| { ty.uninhabited_from(visited, tcx) })) - }, + } + Array(ty, len) => { match len.assert_usize(tcx) { // If the array is definitely non-empty, it's uninhabited if @@ -269,9 +264,13 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { _ => DefIdForest::empty() } } - Ref(_, ty, _) => { - ty.uninhabited_from(visited, tcx) - } + + // References to uninitialised memory is valid for any type, including + // uninhabited types, in unsafe code, so we treat all references as + // inhabited. + // The precise semantics of inhabitedness with respect to references is currently + // undecided. + Ref(..) => DefIdForest::empty(), _ => DefIdForest::empty(), } diff --git a/src/librustc/ty/instance.rs b/src/librustc/ty/instance.rs index 3d205215d6..411a6e7e62 100644 --- a/src/librustc/ty/instance.rs +++ b/src/librustc/ty/instance.rs @@ -8,13 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use hir::Unsafety; use hir::def_id::DefId; -use ty::{self, Ty, TypeFoldable, Substs, TyCtxt}; +use ty::{self, Ty, PolyFnSig, TypeFoldable, Substs, TyCtxt}; use traits; use rustc_target::spec::abi::Abi; use util::ppaux; use std::fmt; +use std::iter; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Instance<'tcx> { @@ -27,6 +29,9 @@ pub enum InstanceDef<'tcx> { Item(DefId), Intrinsic(DefId), + /// `::method` where `method` receives unsizeable `self: Self`. + VtableShim(DefId), + /// \::call_* /// def-id is FnTrait::call_* FnPtrShim(DefId, Ty<'tcx>), @@ -56,6 +61,65 @@ impl<'a, 'tcx> Instance<'tcx> { &ty, ) } + + fn fn_sig_noadjust(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> PolyFnSig<'tcx> { + let ty = self.ty(tcx); + match ty.sty { + ty::FnDef(..) | + // Shims currently have type FnPtr. Not sure this should remain. + ty::FnPtr(_) => ty.fn_sig(tcx), + ty::Closure(def_id, substs) => { + let sig = substs.closure_sig(def_id, tcx); + + let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); + sig.map_bound(|sig| tcx.mk_fn_sig( + iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), + sig.output(), + sig.variadic, + sig.unsafety, + sig.abi + )) + } + ty::Generator(def_id, substs, _) => { + let sig = substs.poly_sig(def_id, tcx); + + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); + let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); + + sig.map_bound(|sig| { + let state_did = tcx.lang_items().gen_state().unwrap(); + let state_adt_ref = tcx.adt_def(state_did); + let state_substs = tcx.intern_substs(&[ + sig.yield_ty.into(), + sig.return_ty.into(), + ]); + let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); + + tcx.mk_fn_sig(iter::once(env_ty), + ret_ty, + false, + Unsafety::Normal, + Abi::Rust + ) + }) + } + _ => bug!("unexpected type {:?} in Instance::fn_sig_noadjust", ty) + } + } + + pub fn fn_sig(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::PolyFnSig<'tcx> { + let mut fn_sig = self.fn_sig_noadjust(tcx); + if let InstanceDef::VtableShim(..) = self.def { + // Modify fn(self, ...) to fn(self: *mut Self, ...) + fn_sig = fn_sig.map_bound(|mut fn_sig| { + let mut inputs_and_output = fn_sig.inputs_and_output.to_vec(); + inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]); + fn_sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output); + fn_sig + }); + } + fn_sig + } } impl<'tcx> InstanceDef<'tcx> { @@ -63,6 +127,7 @@ impl<'tcx> InstanceDef<'tcx> { pub fn def_id(&self) -> DefId { match *self { InstanceDef::Item(def_id) | + InstanceDef::VtableShim(def_id) | InstanceDef::FnPtrShim(def_id, _) | InstanceDef::Virtual(def_id, _) | InstanceDef::Intrinsic(def_id, ) | @@ -108,10 +173,7 @@ impl<'tcx> InstanceDef<'tcx> { // available to normal end-users. return true } - let codegen_fn_attrs = tcx.codegen_fn_attrs(self.def_id()); - // need to use `is_const_fn_raw` since we don't really care if the user can use it as a - // const fn, just whether the function should be inlined - codegen_fn_attrs.requests_inline() || tcx.is_const_fn_raw(self.def_id()) + tcx.codegen_fn_attrs(self.def_id()).requests_inline() } } @@ -120,6 +182,9 @@ impl<'tcx> fmt::Display for Instance<'tcx> { ppaux::parameterized(f, self.substs, self.def_id(), &[])?; match self.def { InstanceDef::Item(_) => Ok(()), + InstanceDef::VtableShim(_) => { + write!(f, " - shim(vtable)") + } InstanceDef::Intrinsic(_) => { write!(f, " - intrinsic") } @@ -145,7 +210,7 @@ impl<'tcx> fmt::Display for Instance<'tcx> { impl<'a, 'b, 'tcx> Instance<'tcx> { pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> Instance<'tcx> { - assert!(!substs.has_escaping_regions(), + assert!(!substs.has_escaping_bound_vars(), "substs of instance {:?} not normalized for codegen: {:?}", def_id, substs); Instance { def: InstanceDef::Item(def_id), substs: substs } @@ -230,6 +295,25 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { result } + pub fn resolve_for_vtable(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>) -> Option> { + debug!("resolve(def_id={:?}, substs={:?})", def_id, substs); + let fn_sig = tcx.fn_sig(def_id); + let is_vtable_shim = + fn_sig.inputs().skip_binder().len() > 0 && fn_sig.input(0).skip_binder().is_self(); + if is_vtable_shim { + debug!(" => associated item with unsizeable self: Self"); + Some(Instance { + def: InstanceDef::VtableShim(def_id), + substs, + }) + } else { + Instance::resolve(tcx, param_env, def_id, substs) + } + } + pub fn resolve_closure( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, @@ -244,6 +328,14 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { _ => Instance::new(def_id, substs.substs) } } + + pub fn is_vtable_shim(&self) -> bool { + if let InstanceDef::VtableShim(..) = self.def { + true + } else { + false + } + } } fn resolve_associated_item<'a, 'tcx>( @@ -305,7 +397,9 @@ fn resolve_associated_item<'a, 'tcx>( None } } - traits::VtableAutoImpl(..) | traits::VtableParam(..) => None + traits::VtableAutoImpl(..) | + traits::VtableParam(..) | + traits::VtableTraitAlias(..) => None } } diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs index 6d92890fc0..350e55288e 100644 --- a/src/librustc/ty/item_path.rs +++ b/src/librustc/ty/item_path.rs @@ -514,10 +514,12 @@ pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option { ty::Str | ty::FnPtr(_) | ty::Projection(_) | + ty::Placeholder(..) | ty::UnnormalizedProjection(..) | ty::Param(_) | ty::Opaque(..) | ty::Infer(_) | + ty::Bound(..) | ty::Error | ty::GeneratorWitness(..) | ty::Never | diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 05d4aeb6dd..5406495226 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -23,6 +23,7 @@ use std::mem; use std::ops::Bound; use ich::StableHashingContext; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; @@ -30,7 +31,7 @@ pub use rustc_target::abi::*; pub trait IntegerExt { fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>; - fn from_attr(cx: C, ity: attr::IntType) -> Integer; + fn from_attr(cx: &C, ity: attr::IntType) -> Integer; fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, @@ -56,7 +57,7 @@ impl IntegerExt for Integer { } /// Get the Integer type from an attr::IntType. - fn from_attr(cx: C, ity: attr::IntType) -> Integer { + fn from_attr(cx: &C, ity: attr::IntType) -> Integer { let dl = cx.data_layout(); match ity { @@ -92,7 +93,7 @@ impl IntegerExt for Integer { let min_default = I8; if let Some(ity) = repr.int { - let discr = Integer::from_attr(tcx, ity); + let discr = Integer::from_attr(&tcx, ity); let fit = if ity.is_signed() { signed_fit } else { unsigned_fit }; if discr < fit { bug!("Integer::repr_discr: `#[repr]` hint too small for \ @@ -202,14 +203,13 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) { }; } -#[derive(Copy, Clone)] pub struct LayoutCx<'tcx, C> { pub tcx: C, pub param_env: ty::ParamEnv<'tcx> } impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { - fn layout_raw_uncached(self, ty: Ty<'tcx>) + fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { let tcx = self.tcx; let param_env = self.param_env; @@ -226,11 +226,12 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) }; let scalar_pair = |a: Scalar, b: Scalar| { - let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); - let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); - let size = (b_offset + b.value.size(dl)).abi_align(align); + let b_align = b.value.align(dl); + let align = a.value.align(dl).max(b_align).max(dl.aggregate_align); + let b_offset = a.value.size(dl).align_to(b_align.abi); + let size = (b_offset + b.value.size(dl)).align_to(align.abi); LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Arbitrary { offsets: vec![Size::ZERO, b_offset], memory_index: vec![0, 1] @@ -257,10 +258,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { bug!("struct cannot be packed and aligned"); } - let pack = { - let pack = repr.pack as u64; - Align::from_bytes(pack, pack).unwrap() - }; + let pack = Align::from_bytes(repr.pack as u64).unwrap(); let mut align = if packed { dl.i8_align @@ -274,7 +272,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut optimize = !repr.inhibit_struct_field_reordering_opt(); if let StructKind::Prefixed(_, align) = kind { - optimize &= align.abi() == 1; + optimize &= align.bytes() == 1; } if optimize { @@ -285,7 +283,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }; let optimizing = &mut inverse_memory_index[..end]; let field_align = |f: &TyLayout<'_>| { - if packed { f.align.min(pack).abi() } else { f.align.abi() } + if packed { f.align.abi.min(pack) } else { f.align.abi } }; match kind { StructKind::AlwaysSized | @@ -312,13 +310,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut offset = Size::ZERO; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { - if packed { - let prefix_align = prefix_align.min(pack); - align = align.max(prefix_align); + let prefix_align = if packed { + prefix_align.min(pack) } else { - align = align.max(prefix_align); - } - offset = prefix_size.abi_align(prefix_align); + prefix_align + }; + align = align.max(AbiAndPrefAlign::new(prefix_align)); + offset = prefix_size.align_to(prefix_align); } for &i in &inverse_memory_index { @@ -333,15 +331,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } // Invariant: offset < dl.obj_size_bound() <= 1<<61 - if packed { - let field_pack = field.align.min(pack); - offset = offset.abi_align(field_pack); - align = align.max(field_pack); - } - else { - offset = offset.abi_align(field.align); - align = align.max(field.align); - } + let field_align = if packed { + field.align.min(AbiAndPrefAlign::new(pack)) + } else { + field.align + }; + offset = offset.align_to(field_align.abi); + align = align.max(field_align); debug!("univariant offset: {:?} field: {:#?}", offset, field); offsets[i as usize] = offset; @@ -352,7 +348,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { if repr.align > 0 { let repr_align = repr.align as u64; - align = align.max(Align::from_bytes(repr_align, repr_align).unwrap()); + align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); debug!("univariant repr_align: {:?}", repr_align); } @@ -377,7 +373,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { memory_index = inverse_memory_index; } - let size = min_size.abi_align(align); + let size = min_size.align_to(align.abi); let mut abi = Abi::Aggregate { sized }; // Unpack newtype ABIs and find scalar pairs. @@ -394,7 +390,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { (Some((i, field)), None, None) => { // Field fills the struct and it has a scalar or scalar pair ABI. if offsets[i].bytes() == 0 && - align.abi() == field.align.abi() && + align.abi == field.align.abi && size == field.size { match field.abi { // For plain scalars, or vectors of them, we can't unpack @@ -455,7 +451,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } Ok(LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Arbitrary { offsets, memory_index @@ -500,7 +496,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // The never type. ty::Never => { tcx.intern_layout(LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Union(0), abi: Abi::Uninhabited, align: dl.i8_align, @@ -556,7 +552,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { .ok_or(LayoutError::SizeOverflow(ty))?; tcx.intern_layout(LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count @@ -569,7 +565,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { ty::Slice(element) => { let element = self.layout_of(element)?; tcx.intern_layout(LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count: 0 @@ -581,7 +577,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } ty::Str => { tcx.intern_layout(LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: Size::from_bytes(1), count: 0 @@ -648,10 +644,10 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; let align = dl.vector_align(size); - let size = size.abi_align(align); + let size = size.align_to(align.abi); tcx.intern_layout(LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count @@ -672,7 +668,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { v.fields.iter().map(|field| { self.layout_of(field.ty(tcx, substs)) }).collect::, _>>() - }).collect::, _>>()?; + }).collect::, _>>()?; if def.is_union() { let packed = def.repr.packed(); @@ -680,10 +676,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { bug!("Union cannot be packed and aligned"); } - let pack = { - let pack = def.repr.pack as u64; - Align::from_bytes(pack, pack).unwrap() - }; + let pack = Align::from_bytes(def.repr.pack as u64).unwrap(); let mut align = if packed { dl.i8_align @@ -694,28 +687,62 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { if def.repr.align > 0 { let repr_align = def.repr.align as u64; align = align.max( - Align::from_bytes(repr_align, repr_align).unwrap()); + AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); } + let optimize = !def.repr.inhibit_union_abi_opt(); let mut size = Size::ZERO; - for field in &variants[0] { + let mut abi = Abi::Aggregate { sized: true }; + let index = VariantIdx::new(0); + for field in &variants[index] { assert!(!field.is_unsized()); - if packed { - let field_pack = field.align.min(pack); - align = align.max(field_pack); + let field_align = if packed { + field.align.min(AbiAndPrefAlign::new(pack)) } else { - align = align.max(field.align); + field.align + }; + align = align.max(field_align); + + // If all non-ZST fields have the same ABI, forward this ABI + if optimize && !field.is_zst() { + // Normalize scalar_unit to the maximal valid range + let field_abi = match &field.abi { + Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)), + Abi::ScalarPair(x, y) => { + Abi::ScalarPair( + scalar_unit(x.value), + scalar_unit(y.value), + ) + } + Abi::Vector { element: x, count } => { + Abi::Vector { + element: scalar_unit(x.value), + count: *count, + } + } + Abi::Uninhabited | + Abi::Aggregate { .. } => Abi::Aggregate { sized: true }, + }; + + if size == Size::ZERO { + // first non ZST: initialize 'abi' + abi = field_abi; + } else if abi != field_abi { + // different fields have different ABI: reset to Aggregate + abi = Abi::Aggregate { sized: true }; + } } + size = cmp::max(size, field.size); } return Ok(tcx.intern_layout(LayoutDetails { - variants: Variants::Single { index: 0 }, - fields: FieldPlacement::Union(variants[0].len()), - abi: Abi::Aggregate { sized: true }, + variants: Variants::Single { index }, + fields: FieldPlacement::Union(variants[index].len()), + abi, align, - size: size.abi_align(align) + size: size.align_to(align.abi) })); } @@ -730,8 +757,12 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { uninhabited && is_zst }; let (present_first, present_second) = { - let mut present_variants = (0..variants.len()).filter(|&v| { - !absent(&variants[v]) + let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| { + if absent(v) { + None + } else { + Some(i) + } }); (present_variants.next(), present_variants.next()) }; @@ -793,16 +824,16 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // The current code for niche-filling relies on variant indices // instead of actual discriminants, so dataful enums with // explicit discriminants (RFC #2363) would misbehave. - let no_explicit_discriminants = def.variants.iter().enumerate() - .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i)); + let no_explicit_discriminants = def.variants.iter_enumerated() + .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); // Niche-filling enum optimization. if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { let mut dataful_variant = None; - let mut niche_variants = usize::max_value()..=0; + let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); // Find one non-ZST variant. - 'variants: for (v, fields) in variants.iter().enumerate() { + 'variants: for (v, fields) in variants.iter_enumerated() { if absent(fields) { continue 'variants; } @@ -825,7 +856,9 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } if let Some(i) = dataful_variant { - let count = (niche_variants.end() - niche_variants.start() + 1) as u128; + let count = ( + niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1 + ) as u128; for (field_index, &field) in variants[i].iter().enumerate() { let niche = match self.find_niche(field)? { Some(niche) => niche, @@ -837,7 +870,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }; let mut align = dl.aggregate_align; - let st = variants.iter().enumerate().map(|(j, v)| { + let st = variants.iter_enumerated().map(|(j, v)| { let mut st = univariant_uninterned(v, &def.repr, StructKind::AlwaysSized)?; st.variants = Variants::Single { index: j }; @@ -845,7 +878,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { align = align.max(st.align); Ok(st) - }).collect::, _>>()?; + }).collect::, _>>()?; let offset = st[i].fields.offset(field_index) + niche.offset; let size = st[i].size; @@ -899,8 +932,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let (mut min, mut max) = (i128::max_value(), i128::min_value()); let discr_type = def.repr.discr_type(); - let bits = Integer::from_attr(tcx, discr_type).size().bits(); - for (i, discr) in def.discriminants(tcx).enumerate() { + let bits = Integer::from_attr(self, discr_type).size().bits(); + for (i, discr) in def.discriminants(tcx) { if variants[i].iter().any(|f| f.abi.is_uninhabited()) { continue; } @@ -924,43 +957,43 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut size = Size::ZERO; // We're interested in the smallest alignment, so start large. - let mut start_align = Align::from_bytes(256, 256).unwrap(); - assert_eq!(Integer::for_abi_align(dl, start_align), None); + let mut start_align = Align::from_bytes(256).unwrap(); + assert_eq!(Integer::for_align(dl, start_align), None); // repr(C) on an enum tells us to make a (tag, union) layout, // so we need to grow the prefix alignment to be at least // the alignment of the union. (This value is used both for // determining the alignment of the overall enum, and the // determining the alignment of the payload after the tag.) - let mut prefix_align = min_ity.align(dl); + let mut prefix_align = min_ity.align(dl).abi; if def.repr.c() { for fields in &variants { for field in fields { - prefix_align = prefix_align.max(field.align); + prefix_align = prefix_align.max(field.align.abi); } } } // Create the set of structs that represent each variant. - let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| { + let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| { let mut st = univariant_uninterned(&field_layouts, &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?; st.variants = Variants::Single { index: i }; // Find the first field we can't move later // to make room for a larger discriminant. for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { - if !field.is_zst() || field.align.abi() != 1 { - start_align = start_align.min(field.align); + if !field.is_zst() || field.align.abi.bytes() != 1 { + start_align = start_align.min(field.align.abi); break; } } size = cmp::max(size, st.size); align = align.max(st.align); Ok(st) - }).collect::, _>>()?; + }).collect::, _>>()?; // Align the maximum variant size to the largest alignment. - size = size.abi_align(align); + size = size.align_to(align.abi); if size.bytes() >= dl.obj_size_bound() { return Err(LayoutError::SizeOverflow(ty)); @@ -996,7 +1029,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut ity = if def.repr.c() || def.repr.int.is_some() { min_ity } else { - Integer::for_abi_align(dl, start_align).unwrap_or(min_ity) + Integer::for_align(dl, start_align).unwrap_or(min_ity) }; // If the alignment is not larger than the chosen discriminant size, @@ -1124,9 +1157,15 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } tcx.layout_raw(param_env.and(normalized))? } - ty::UnnormalizedProjection(..) | ty::GeneratorWitness(..) | ty::Infer(_) => { + + ty::Bound(..) | + ty::Placeholder(..) | + ty::UnnormalizedProjection(..) | + ty::GeneratorWitness(..) | + ty::Infer(_) => { bug!("LayoutDetails::compute: unexpected type `{}`", ty) } + ty::Param(_) | ty::Error => { return Err(LayoutError::Unknown(ty)); } @@ -1136,7 +1175,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline] - fn record_layout_for_printing(self, layout: TyLayout<'tcx>) { + fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1153,13 +1192,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { self.record_layout_for_printing_outlined(layout) } - fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) { + fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) { // (delay format until we actually need it) let record = |kind, packed, opt_discr_size, variants| { let type_desc = format!("{:?}", layout.ty); self.tcx.sess.code_stats.borrow_mut().record_type_size(kind, type_desc, - layout.align, + layout.align.abi, layout.size, packed, opt_discr_size, @@ -1206,7 +1245,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { name: name.to_string(), offset: offset.bytes(), size: field_layout.size.bytes(), - align: field_layout.align.abi(), + align: field_layout.align.abi.bytes(), } } } @@ -1219,7 +1258,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } else { session::SizeKind::Exact }, - align: layout.align.abi(), + align: layout.align.abi.bytes(), size: if min_size.bytes() == 0 { layout.size.bytes() } else { @@ -1255,7 +1294,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { debug!("print-type-size `{:#?}` adt general variants def {}", layout.ty, adt_def.variants.len()); let variant_infos: Vec<_> = - adt_def.variants.iter().enumerate().map(|(i, variant_def)| { + adt_def.variants.iter_enumerated().map(|(i, variant_def)| { let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect(); build_variant_info(Some(variant_def.name), @@ -1275,7 +1314,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// Type size "skeleton", i.e. the only information determining a type's size. /// While this is conservative, (aside from constant sizes, only pointers, /// newtypes thereof and null pointer optimized enums are allowed), it is -/// enough to statically check common usecases of transmute. +/// enough to statically check common use cases of transmute. #[derive(Copy, Clone, Debug)] pub enum SizeSkeleton<'tcx> { /// Any statically computable Layout. @@ -1335,7 +1374,8 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { } // Get a zero-sized variant or a pointer newtype. - let zero_or_ptr_variant = |i: usize| { + let zero_or_ptr_variant = |i| { + let i = VariantIdx::new(i); let fields = def.variants[i].fields.iter().map(|field| { SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env) }); @@ -1473,7 +1513,7 @@ impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { let param_env = self.param_env.with_reveal_all(); let ty = self.tcx.normalize_erasing_regions(param_env, ty); let details = self.tcx.layout_raw(param_env.and(ty))?; @@ -1500,7 +1540,7 @@ impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { let param_env = self.param_env.with_reveal_all(); let ty = self.tcx.normalize_erasing_regions(param_env, ty); let details = self.tcx.layout_raw(param_env.and(ty))?; @@ -1558,7 +1598,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> where C: LayoutOf> + HasTyCtxt<'tcx>, C::TyLayout: MaybeResult> { - fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> { + fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> { let details = match this.variants { Variants::Single { index } if index == variant_index => this.details, @@ -1597,7 +1637,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> } } - fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout { + fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout { let tcx = cx.tcx(); cx.layout_of(match this.ty.sty { ty::Bool | @@ -1694,7 +1734,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> Variants::Tagged { tag: ref discr, .. } | Variants::NicheFilling { niche: ref discr, .. } => { assert_eq!(i, 0); - let layout = LayoutDetails::scalar(tcx, discr.clone()); + let layout = LayoutDetails::scalar(cx, discr.clone()); return MaybeResult::from_ok(TyLayout { details: tcx.intern_layout(layout), ty: discr.value.to_ty(tcx) @@ -1703,8 +1743,9 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> } } - ty::Projection(_) | ty::UnnormalizedProjection(..) | - ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => { + ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) | + ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | + ty::Error => { bug!("TyLayout::field_type: unexpected type `{}`", this.ty) } }) @@ -1720,7 +1761,7 @@ struct Niche { impl Niche { fn reserve<'a, 'tcx>( &self, - cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>, + cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>, count: u128, ) -> Option<(u128, Scalar)> { if count > self.available { @@ -1740,7 +1781,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// Find the offset of a niche leaf field, starting from /// the given type and recursing through aggregates. // FIXME(eddyb) traverse already optimized enums. - fn find_niche(self, layout: TyLayout<'tcx>) -> Result, LayoutError<'tcx>> { + fn find_niche(&self, layout: TyLayout<'tcx>) -> Result, LayoutError<'tcx>> { let scalar_niche = |scalar: &Scalar, offset| { let Scalar { value, valid_range: ref v } = *scalar; @@ -1777,7 +1818,9 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { Abi::ScalarPair(ref a, ref b) => { // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. - let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self)))) + let niche = iter::once( + (b, a.value.size(self).align_to(b.value.align(self).abi)) + ) .chain(iter::once((a, Size::ZERO))) .filter_map(|(scalar, offset)| scalar_niche(scalar, offset)) .max_by_key(|niche| niche.available); @@ -1878,6 +1921,16 @@ impl<'a> HashStable> for FieldPlacement { } } +impl<'a> HashStable> for VariantIdx { + fn hash_stable( + &self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher, + ) { + self.as_u32().hash_stable(hcx, hasher) + } +} + impl<'a> HashStable> for Abi { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, @@ -1938,12 +1991,16 @@ impl_stable_hash_for!(enum ::ty::layout::Primitive { Pointer }); +impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign { + abi, + pref +}); + impl<'gcx> HashStable> for Align { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - self.abi().hash_stable(hcx, hasher); - self.pref().hash_stable(hcx, hasher); + self.bytes().hash_stable(hcx, hasher); } } diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 5a2f062f23..4633ab1166 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -27,11 +27,12 @@ use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangIte use middle::privacy::AccessLevels; use middle::resolve_lifetime::ObjectLifetimeDefault; use mir::Mir; -use mir::interpret::GlobalId; +use mir::interpret::{GlobalId, ErrorHandled}; use mir::GeneratorLayout; use session::CrateDisambiguator; use traits::{self, Reveal}; use ty; +use ty::layout::VariantIdx; use ty::subst::{Subst, Substs}; use ty::util::{IntTypeExt, Discr}; use ty::walk::TypeWalker; @@ -48,7 +49,6 @@ use std::hash::{Hash, Hasher}; use std::ops::Deref; use rustc_data_structures::sync::{self, Lrc, ParallelIterator, par_iter}; use std::slice; -use std::vec::IntoIter; use std::{mem, ptr}; use syntax::ast::{self, DUMMY_NODE_ID, Name, Ident, NodeId}; use syntax::attr; @@ -57,13 +57,13 @@ use syntax::symbol::{keywords, Symbol, LocalInternedString, InternedString}; use syntax_pos::{DUMMY_SP, Span}; use smallvec; -use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, HashStable}; use hir; -pub use self::sty::{Binder, BoundTy, BoundTyIndex, DebruijnIndex, INNERMOST}; +pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar, DebruijnIndex, INNERMOST}; pub use self::sty::{FnSig, GenSig, CanonicalPolyFnSig, PolyFnSig, PolyGenSig}; pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; pub use self::sty::{ClosureSubsts, GeneratorSubsts, UpvarSubsts, TypeAndMut}; @@ -294,7 +294,7 @@ impl Visibility { } } - /// Returns true if an item with this visibility is accessible from the given block. + /// Returns `true` if an item with this visibility is accessible from the given block. pub fn is_accessible_from(self, module: DefId, tree: T) -> bool { let restriction = match self { // Public items are visible everywhere. @@ -309,7 +309,7 @@ impl Visibility { tree.is_descendant_of(module, restriction) } - /// Returns true if this visibility is at least as accessible as the given visibility + /// Returns `true` if this visibility is at least as accessible as the given visibility pub fn is_at_least(self, vis: Visibility, tree: T) -> bool { let vis_restriction = match vis { Visibility::Public => return self == Visibility::Public, @@ -320,7 +320,7 @@ impl Visibility { self.is_accessible_from(vis_restriction, tree) } - // Returns true if this item is visible anywhere in the local crate. + // Returns `true` if this item is visible anywhere in the local crate. pub fn is_visible_locally(self) -> bool { match self { Visibility::Public => true, @@ -432,7 +432,7 @@ bitflags! { const HAS_SELF = 1 << 1; const HAS_TY_INFER = 1 << 2; const HAS_RE_INFER = 1 << 3; - const HAS_RE_SKOL = 1 << 4; + const HAS_RE_PLACEHOLDER = 1 << 4; /// Does this have any `ReEarlyBound` regions? Used to /// determine whether substitition is required, since those @@ -451,7 +451,7 @@ bitflags! { // FIXME: Rename this to the actual property since it's used for generators too const HAS_TY_CLOSURE = 1 << 9; - // true if there are "names" of types and regions and so forth + // `true` if there are "names" of types and regions and so forth // that are local to a particular fn const HAS_FREE_LOCAL_NAMES = 1 << 10; @@ -463,13 +463,11 @@ bitflags! { // Currently we can't normalize projections w/ bound regions. const HAS_NORMALIZABLE_PROJECTION = 1 << 12; - // Set if this includes a "canonical" type or region var -- - // ought to be true only for the results of canonicalization. - const HAS_CANONICAL_VARS = 1 << 13; - /// Does this have any `ReLateBound` regions? Used to check /// if a global bound is safe to evaluate. - const HAS_RE_LATE_BOUND = 1 << 14; + const HAS_RE_LATE_BOUND = 1 << 13; + + const HAS_TY_PLACEHOLDER = 1 << 14; const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits | TypeFlags::HAS_SELF.bits | @@ -482,7 +480,7 @@ bitflags! { TypeFlags::HAS_SELF.bits | TypeFlags::HAS_TY_INFER.bits | TypeFlags::HAS_RE_INFER.bits | - TypeFlags::HAS_RE_SKOL.bits | + TypeFlags::HAS_RE_PLACEHOLDER.bits | TypeFlags::HAS_RE_EARLY_BOUND.bits | TypeFlags::HAS_FREE_REGIONS.bits | TypeFlags::HAS_TY_ERR.bits | @@ -490,8 +488,8 @@ bitflags! { TypeFlags::HAS_TY_CLOSURE.bits | TypeFlags::HAS_FREE_LOCAL_NAMES.bits | TypeFlags::KEEP_IN_LOCAL_TCX.bits | - TypeFlags::HAS_CANONICAL_VARS.bits | - TypeFlags::HAS_RE_LATE_BOUND.bits; + TypeFlags::HAS_RE_LATE_BOUND.bits | + TypeFlags::HAS_TY_PLACEHOLDER.bits; } } @@ -519,6 +517,10 @@ pub struct TyS<'tcx> { outer_exclusive_binder: ty::DebruijnIndex, } +// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert!(MEM_SIZE_OF_TY_S: ::std::mem::size_of::>() == 32); + impl<'tcx> Ord for TyS<'tcx> { fn cmp(&self, other: &TyS<'tcx>) -> Ordering { self.sty.cmp(&other.sty) @@ -549,14 +551,14 @@ impl<'tcx> TyS<'tcx> { pub fn is_primitive_ty(&self) -> bool { match self.sty { TyKind::Bool | - TyKind::Char | - TyKind::Int(_) | - TyKind::Uint(_) | - TyKind::Float(_) | - TyKind::Infer(InferTy::IntVar(_)) | - TyKind::Infer(InferTy::FloatVar(_)) | - TyKind::Infer(InferTy::FreshIntTy(_)) | - TyKind::Infer(InferTy::FreshFloatTy(_)) => true, + TyKind::Char | + TyKind::Int(_) | + TyKind::Uint(_) | + TyKind::Float(_) | + TyKind::Infer(InferTy::IntVar(_)) | + TyKind::Infer(InferTy::FloatVar(_)) | + TyKind::Infer(InferTy::FreshIntTy(_)) | + TyKind::Infer(InferTy::FreshFloatTy(_)) => true, TyKind::Ref(_, x, _) => x.is_primitive_ty(), _ => false, } @@ -731,12 +733,17 @@ impl List { } } +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct UpvarPath { + pub hir_id: hir::HirId, +} + /// Upvars do not get their own node-id. Instead, we use the pair of /// the original var id (that is, the root variable that is referenced /// by the upvar) and the id of the closure expression. #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct UpvarId { - pub var_id: hir::HirId, + pub var_path: UpvarPath, pub closure_expr_id: LocalDefId, } @@ -958,7 +965,7 @@ impl<'a, 'gcx, 'tcx> Generics { _ => bug!("expected lifetime parameter, but found another generic parameter") } } else { - tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) + tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?")) .region_param(param, tcx) } } @@ -975,7 +982,7 @@ impl<'a, 'gcx, 'tcx> Generics { _ => bug!("expected type parameter, but found another generic parameter") } } else { - tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) + tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?")) .type_param(param, tcx) } } @@ -998,6 +1005,7 @@ impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { self.instantiate_into(tcx, &mut instantiated, substs); instantiated } + pub fn instantiate_own(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) -> InstantiatedPredicates<'tcx> { InstantiatedPredicates { @@ -1046,29 +1054,29 @@ impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum Predicate<'tcx> { - /// Corresponds to `where Foo : Bar`. `Foo` here would be + /// Corresponds to `where Foo: Bar`. `Foo` here would be /// the `Self` type of the trait reference and `A`, `B`, and `C` /// would be the type parameters. Trait(PolyTraitPredicate<'tcx>), - /// where 'a : 'b + /// where `'a: 'b` RegionOutlives(PolyRegionOutlivesPredicate<'tcx>), - /// where T : 'a + /// where `T: 'a` TypeOutlives(PolyTypeOutlivesPredicate<'tcx>), - /// where ::Name == X, approximately. - /// See `ProjectionPredicate` struct for details. + /// where `::Name == X`, approximately. + /// See the `ProjectionPredicate` struct for details. Projection(PolyProjectionPredicate<'tcx>), - /// no syntax: T WF + /// no syntax: `T` well-formed WellFormed(Ty<'tcx>), /// trait must be object-safe ObjectSafe(DefId), - /// No direct syntax. May be thought of as `where T : FnFoo<...>` - /// for some substitutions `...` and T being a closure type. + /// No direct syntax. May be thought of as `where T: FnFoo<...>` + /// for some substitutions `...` and `T` being a closure type. /// Satisfied (or refuted) once we know the closure's kind. ClosureKind(DefId, ClosureSubsts<'tcx>, ClosureKind), @@ -1116,11 +1124,11 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { // // Let's start with an easy case. Consider two traits: // - // trait Foo<'a> : Bar<'a,'a> { } + // trait Foo<'a>: Bar<'a,'a> { } // trait Bar<'b,'c> { } // - // Now, if we have a trait reference `for<'x> T : Foo<'x>`, then - // we can deduce that `for<'x> T : Bar<'x,'x>`. Basically, if we + // Now, if we have a trait reference `for<'x> T: Foo<'x>`, then + // we can deduce that `for<'x> T: Bar<'x,'x>`. Basically, if we // knew that `Foo<'x>` (for any 'x) then we also know that // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from // normal substitution. @@ -1133,21 +1141,21 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { // // Another example to be careful of is this: // - // trait Foo1<'a> : for<'b> Bar1<'a,'b> { } + // trait Foo1<'a>: for<'b> Bar1<'a,'b> { } // trait Bar1<'b,'c> { } // - // Here, if we have `for<'x> T : Foo1<'x>`, then what do we know? - // The answer is that we know `for<'x,'b> T : Bar1<'x,'b>`. The + // Here, if we have `for<'x> T: Foo1<'x>`, then what do we know? + // The answer is that we know `for<'x,'b> T: Bar1<'x,'b>`. The // reason is similar to the previous example: any impl of - // `T:Foo1<'x>` must show that `for<'b> T : Bar1<'x, 'b>`. So + // `T:Foo1<'x>` must show that `for<'b> T: Bar1<'x, 'b>`. So // basically we would want to collapse the bound lifetimes from // the input (`trait_ref`) and the supertraits. // // To achieve this in practice is fairly straightforward. Let's // consider the more complicated scenario: // - // - We start out with `for<'x> T : Foo1<'x>`. In this case, `'x` - // has a De Bruijn index of 1. We want to produce `for<'x,'b> T : Bar1<'x,'b>`, + // - We start out with `for<'x> T: Foo1<'x>`. In this case, `'x` + // has a De Bruijn index of 1. We want to produce `for<'x,'b> T: Bar1<'x,'b>`, // where both `'x` and `'b` would have a DB index of 1. // The substitution from the input trait-ref is therefore going to be // `'a => 'x` (where `'x` has a DB index of 1). @@ -1199,6 +1207,7 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { pub struct TraitPredicate<'tcx> { pub trait_ref: TraitRef<'tcx> } + pub type PolyTraitPredicate<'tcx> = ty::Binder>; impl<'tcx> TraitPredicate<'tcx> { @@ -1223,7 +1232,7 @@ impl<'tcx> PolyTraitPredicate<'tcx> { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct OutlivesPredicate(pub A, pub B); // `A : B` +pub struct OutlivesPredicate(pub A, pub B); // `A: B` pub type PolyOutlivesPredicate = ty::Binder>; pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate, ty::Region<'tcx>>; @@ -1243,11 +1252,11 @@ pub type PolySubtypePredicate<'tcx> = ty::Binder>; /// This kind of predicate has no *direct* correspondent in the /// syntax, but it roughly corresponds to the syntactic forms: /// -/// 1. `T : TraitRef<..., Item=Type>` +/// 1. `T: TraitRef<..., Item=Type>` /// 2. `>::Item == Type` (NYI) /// /// In particular, form #1 is "desugared" to the combination of a -/// normal trait predicate (`T : TraitRef<...>`) and one of these +/// normal trait predicate (`T: TraitRef<...>`) and one of these /// predicates. Form #2 is a broader form in that it also permits /// equality between arbitrary types. Processing an instance of /// Form #2 eventually yields one of these `ProjectionPredicate` @@ -1261,14 +1270,15 @@ pub struct ProjectionPredicate<'tcx> { pub type PolyProjectionPredicate<'tcx> = Binder>; impl<'tcx> PolyProjectionPredicate<'tcx> { - /// Returns the def-id of the associated item being projected. + /// Returns the `DefId` of the associated item being projected. pub fn item_def_id(&self) -> DefId { self.skip_binder().projection_ty.item_def_id } + #[inline] pub fn to_poly_trait_ref(&self, tcx: TyCtxt<'_, '_, '_>) -> PolyTraitRef<'tcx> { - // Note: unlike with TraitRef::to_poly_trait_ref(), - // self.0.trait_ref is permitted to have escaping regions. + // Note: unlike with `TraitRef::to_poly_trait_ref()`, + // `self.0.trait_ref` is permitted to have escaping regions. // This is because here `self` has a `Binder` and so does our // return value, so we are preserving the number of binding // levels. @@ -1279,12 +1289,12 @@ impl<'tcx> PolyProjectionPredicate<'tcx> { self.map_bound(|predicate| predicate.ty) } - /// The DefId of the TraitItem for the associated type. + /// The `DefId` of the `TraitItem` for the associated type. /// - /// Note that this is not the DefId of the TraitRef containing this - /// associated type, which is in tcx.associated_item(projection_def_id()).container. + /// Note that this is not the `DefId` of the `TraitRef` containing this + /// associated type, which is in `tcx.associated_item(projection_def_id()).container`. pub fn projection_def_id(&self) -> DefId { - // ok to skip binder since trait def-id does not care about regions + // okay to skip binder since trait def-id does not care about regions self.skip_binder().projection_ty.item_def_id } } @@ -1341,49 +1351,88 @@ impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> { } } +// A custom iterator used by Predicate::walk_tys. +enum WalkTysIter<'tcx, I, J, K> + where I: Iterator>, + J: Iterator>, + K: Iterator> +{ + None, + One(Ty<'tcx>), + Two(Ty<'tcx>, Ty<'tcx>), + Types(I), + InputTypes(J), + ProjectionTypes(K) +} + +impl<'tcx, I, J, K> Iterator for WalkTysIter<'tcx, I, J, K> + where I: Iterator>, + J: Iterator>, + K: Iterator> +{ + type Item = Ty<'tcx>; + + fn next(&mut self) -> Option> { + match *self { + WalkTysIter::None => None, + WalkTysIter::One(item) => { + *self = WalkTysIter::None; + Some(item) + }, + WalkTysIter::Two(item1, item2) => { + *self = WalkTysIter::One(item2); + Some(item1) + }, + WalkTysIter::Types(ref mut iter) => { + iter.next() + }, + WalkTysIter::InputTypes(ref mut iter) => { + iter.next() + }, + WalkTysIter::ProjectionTypes(ref mut iter) => { + iter.next() + } + } + } +} + impl<'tcx> Predicate<'tcx> { /// Iterates over the types in this predicate. Note that in all /// cases this is skipping over a binder, so late-bound regions /// with depth 0 are bound by the predicate. - pub fn walk_tys(&self) -> IntoIter> { - let vec: Vec<_> = match *self { + pub fn walk_tys(&'a self) -> impl Iterator> + 'a { + match *self { ty::Predicate::Trait(ref data) => { - data.skip_binder().input_types().collect() + WalkTysIter::InputTypes(data.skip_binder().input_types()) } ty::Predicate::Subtype(binder) => { let SubtypePredicate { a, b, a_is_expected: _ } = binder.skip_binder(); - vec![a, b] + WalkTysIter::Two(a, b) } ty::Predicate::TypeOutlives(binder) => { - vec![binder.skip_binder().0] + WalkTysIter::One(binder.skip_binder().0) } ty::Predicate::RegionOutlives(..) => { - vec![] + WalkTysIter::None } ty::Predicate::Projection(ref data) => { let inner = data.skip_binder(); - inner.projection_ty.substs.types().chain(Some(inner.ty)).collect() + WalkTysIter::ProjectionTypes( + inner.projection_ty.substs.types().chain(Some(inner.ty))) } ty::Predicate::WellFormed(data) => { - vec![data] + WalkTysIter::One(data) } ty::Predicate::ObjectSafe(_trait_def_id) => { - vec![] + WalkTysIter::None } ty::Predicate::ClosureKind(_closure_def_id, closure_substs, _kind) => { - closure_substs.substs.types().collect() + WalkTysIter::Types(closure_substs.substs.types()) } ty::Predicate::ConstEvaluatable(_, substs) => { - substs.types().collect() + WalkTysIter::Types(substs.types()) } - }; - - // FIXME: The only reason to collect into a vector here is that I was - // too lazy to make the full (somewhat complicated) iterator - // type that would be needed here. But I wanted this fn to - // return an iterator conceptually, rather than a `Vec`, so as - // to be closer to `Ty::walk`. - vec.into_iter() + } } pub fn to_opt_poly_trait_ref(&self) -> Option> { @@ -1520,12 +1569,19 @@ impl UniverseIndex { UniverseIndex::from_u32(self.private.checked_add(1).unwrap()) } - /// True if `self` can name a name from `other` -- in other words, + /// Returns `true` if `self` can name a name from `other` -- in other words, /// if the set of names in `self` is a superset of those in - /// `other`. + /// `other` (`self >= other`). pub fn can_name(self, other: UniverseIndex) -> bool { self.private >= other.private } + + /// Returns `true` if `self` cannot name some names from `other` -- in other + /// words, if the set of names in `self` is a strict subset of + /// those in `other` (`self < other`). + pub fn cannot_name(self, other: UniverseIndex) -> bool { + self.private < other.private + } } /// The "placeholder index" fully defines a placeholder region. @@ -1535,11 +1591,28 @@ impl UniverseIndex { /// universe are just two regions with an unknown relationship to one /// another. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, PartialOrd, Ord)] -pub struct Placeholder { +pub struct Placeholder { pub universe: UniverseIndex, - pub name: BoundRegion, + pub name: T, } +impl<'a, 'gcx, T> HashStable> for Placeholder + where T: HashStable> +{ + fn hash_stable( + &self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher + ) { + self.universe.hash_stable(hcx, hasher); + self.name.hash_stable(hcx, hasher); + } +} + +pub type PlaceholderRegion = Placeholder; + +pub type PlaceholderType = Placeholder; + /// When type checking, we use the `ParamEnv` to track /// details about the set of where-clauses that are in scope at this /// particular point. @@ -1561,6 +1634,7 @@ impl<'tcx> ParamEnv<'tcx> { /// there are no where clauses in scope. Hidden types (like `impl /// Trait`) are left hidden, so this is suitable for ordinary /// type-checking. + #[inline] pub fn empty() -> Self { Self::new(List::empty(), Reveal::UserFacing) } @@ -1570,13 +1644,15 @@ impl<'tcx> ParamEnv<'tcx> { /// are revealed. This is suitable for monomorphized, post-typeck /// environments like codegen or doing optimizations. /// - /// NB. If you want to have predicates in scope, use `ParamEnv::new`, + /// N.B. If you want to have predicates in scope, use `ParamEnv::new`, /// or invoke `param_env.with_reveal_all()`. + #[inline] pub fn reveal_all() -> Self { Self::new(List::empty(), Reveal::All) } /// Construct a trait environment with the given set of predicates. + #[inline] pub fn new(caller_bounds: &'tcx List>, reveal: Reveal) -> Self { @@ -1620,7 +1696,7 @@ impl<'tcx> ParamEnv<'tcx> { } Reveal::All => { - if value.has_skol() + if value.has_placeholders() || value.needs_infer() || value.has_param_types() || value.has_self_ty() @@ -1779,7 +1855,7 @@ pub enum VariantDiscr { /// For efficiency reasons, the distance from the /// last `Explicit` discriminant is being stored, /// or `0` for the first variant, if it has none. - Relative(usize), + Relative(u32), } #[derive(Debug)] @@ -1795,7 +1871,7 @@ pub struct FieldDef { /// table. pub struct AdtDef { pub did: DefId, - pub variants: Vec, + pub variants: IndexVec, flags: AdtFlags, pub repr: ReprOptions, } @@ -1928,7 +2004,7 @@ impl ReprOptions { let mut max_align = 0; let mut min_pack = 0; for attr in tcx.get_attrs(did).iter() { - for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) { + for r in attr::find_repr_attrs(&tcx.sess.parse_sess, attr) { flags.insert(match r { attr::ReprC => ReprFlags::IS_C, attr::ReprPacked(pack) => { @@ -1975,25 +2051,31 @@ impl ReprOptions { self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize)) } - /// Returns true if this `#[repr()]` should inhabit "smart enum + /// Returns `true` if this `#[repr()]` should inhabit "smart enum /// layout" optimizations, such as representing `Foo<&T>` as a /// single pointer. pub fn inhibit_enum_layout_opt(&self) -> bool { self.c() || self.int.is_some() } - /// Returns true if this `#[repr()]` should inhibit struct field reordering + /// Returns `true` if this `#[repr()]` should inhibit struct field reordering /// optimizations, such as with repr(C) or repr(packed(1)). pub fn inhibit_struct_field_reordering_opt(&self) -> bool { !(self.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty() || (self.pack == 1) } + + /// Returns true if this `#[repr()]` should inhibit union abi optimisations + pub fn inhibit_union_abi_opt(&self) -> bool { + self.c() + } + } impl<'a, 'gcx, 'tcx> AdtDef { fn new(tcx: TyCtxt<'_, '_, '_>, did: DefId, kind: AdtKind, - variants: Vec, + variants: IndexVec, repr: ReprOptions) -> Self { debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr); let mut flags = AdtFlags::NO_ADT_FLAGS; @@ -2070,6 +2152,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { } } + #[inline] pub fn variant_descr(&self) -> &'static str { match self.adt_kind() { AdtKind::Struct => "struct", @@ -2085,7 +2168,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.flags.intersects(AdtFlags::IS_FUNDAMENTAL) } - /// Returns true if this is PhantomData. + /// Returns `true` if this is PhantomData. #[inline] pub fn is_phantom_data(&self) -> bool { self.flags.intersects(AdtFlags::IS_PHANTOM_DATA) @@ -2101,7 +2184,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.flags.intersects(AdtFlags::IS_RC) } - /// Returns true if this is Box. + /// Returns `true` if this is Box. #[inline] pub fn is_box(&self) -> bool { self.flags.intersects(AdtFlags::IS_BOX) @@ -2115,11 +2198,11 @@ impl<'a, 'gcx, 'tcx> AdtDef { /// Asserts this is a struct or union and returns its unique variant. pub fn non_enum_variant(&self) -> &VariantDef { assert!(self.is_struct() || self.is_union()); - &self.variants[0] + &self.variants[VariantIdx::new(0)] } #[inline] - pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> GenericPredicates<'gcx> { + pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Lrc> { tcx.predicates_of(self.did) } @@ -2142,11 +2225,12 @@ impl<'a, 'gcx, 'tcx> AdtDef { .expect("variant_with_id: unknown variant") } - pub fn variant_index_with_id(&self, vid: DefId) -> usize { + pub fn variant_index_with_id(&self, vid: DefId) -> VariantIdx { self.variants - .iter() - .position(|v| v.did == vid) + .iter_enumerated() + .find(|(_, v)| v.did == vid) .expect("variant_index_with_id: unknown variant") + .0 } pub fn variant_of_def(&self, def: Def) -> &VariantDef { @@ -2191,11 +2275,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { None } } - Err(err) => { - err.report_as_error( - tcx.at(tcx.def_span(expr_did)), - "could not evaluate enum discriminant", - ); + Err(ErrorHandled::Reported) => { if !expr_did.is_local() { span_bug!(tcx.def_span(expr_did), "variant discriminant evaluation succeeded \ @@ -2203,6 +2283,10 @@ impl<'a, 'gcx, 'tcx> AdtDef { } None } + Err(ErrorHandled::TooGeneric) => span_bug!( + tcx.def_span(expr_did), + "enum discriminant depends on generic arguments", + ), } } @@ -2210,11 +2294,11 @@ impl<'a, 'gcx, 'tcx> AdtDef { pub fn discriminants( &'a self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - ) -> impl Iterator> + Captures<'gcx> + 'a { + ) -> impl Iterator)> + Captures<'gcx> + 'a { let repr_type = self.repr.discr_type(); let initial = repr_type.initial_discriminant(tcx.global_tcx()); let mut prev_discr = None::>; - self.variants.iter().map(move |v| { + self.variants.iter_enumerated().map(move |(i, v)| { let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx)); if let VariantDiscr::Explicit(expr_did) = v.discr { if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) { @@ -2223,7 +2307,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { } prev_discr = Some(discr); - discr + (i, discr) }) } @@ -2234,7 +2318,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { /// assuming there are no constant-evaluation errors there. pub fn discriminant_for_variant(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - variant_index: usize) + variant_index: VariantIdx) -> Discr<'tcx> { let (val, offset) = self.discriminant_def_for_variant(variant_index); let explicit_value = val @@ -2248,12 +2332,12 @@ impl<'a, 'gcx, 'tcx> AdtDef { /// inferred discriminant directly pub fn discriminant_def_for_variant( &self, - variant_index: usize, - ) -> (Option, usize) { - let mut explicit_index = variant_index; + variant_index: VariantIdx, + ) -> (Option, u32) { + let mut explicit_index = variant_index.as_u32(); let expr_did; loop { - match self.variants[explicit_index].discr { + match self.variants[VariantIdx::from_u32(explicit_index)].discr { ty::VariantDiscr::Relative(0) => { expr_did = None; break; @@ -2267,7 +2351,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { } } } - (expr_did, variant_index - explicit_index) + (expr_did, variant_index.as_u32() - explicit_index) } pub fn destructor(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { @@ -2361,14 +2445,16 @@ impl<'a, 'gcx, 'tcx> AdtDef { def_id: sized_trait, substs: tcx.mk_substs_trait(ty, &[]) }).to_predicate(); - let predicates = tcx.predicates_of(self.did).predicates; - if predicates.into_iter().any(|(p, _)| p == sized_predicate) { + let predicates = &tcx.predicates_of(self.did).predicates; + if predicates.iter().any(|(p, _)| *p == sized_predicate) { vec![] } else { vec![ty] } } + Placeholder(..) | + Bound(..) | Infer(..) => { bug!("unexpected type `{:?}` in sized_constraint_for_ty", ty) @@ -2387,7 +2473,7 @@ impl<'a, 'gcx, 'tcx> FieldDef { /// Represents the various closure traits in the Rust language. This /// will determine the type of the environment (`self`, in the -/// desuaring) argument that the closure expects. +/// desugaring) argument that the closure expects. /// /// You can get the environment type of a closure using /// `tcx.closure_env_ty()`. @@ -2417,7 +2503,7 @@ impl<'a, 'tcx> ClosureKind { } } - /// True if this a type that impls this closure kind + /// Returns `true` if this a type that impls this closure kind /// must also implement `other`. pub fn extends(self, other: ty::ClosureKind) -> bool { match (self, other) { @@ -2470,7 +2556,7 @@ impl<'tcx> TyS<'tcx> { /// /// Note: prefer `ty.walk()` where possible. pub fn maybe_walk(&'tcx self, mut f: F) - where F : FnMut(Ty<'tcx>) -> bool + where F: FnMut(Ty<'tcx>) -> bool { let mut walker = self.walk(); while let Some(ty) = walker.next() { @@ -2667,13 +2753,20 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn associated_items( self, def_id: DefId, - ) -> impl Iterator + 'a { - let def_ids = self.associated_item_def_ids(def_id); - Box::new((0..def_ids.len()).map(move |i| self.associated_item(def_ids[i]))) - as Box + 'a> + ) -> AssociatedItemsIterator<'a, 'gcx, 'tcx> { + // Ideally, we would use `-> impl Iterator` here, but it falls + // afoul of the conservative "capture [restrictions]" we put + // in place, so we use a hand-written iterator. + // + // [restrictions]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999 + AssociatedItemsIterator { + tcx: self, + def_ids: self.associated_item_def_ids(def_id), + next_index: 0, + } } - /// Returns true if the impls are the same polarity and the trait either + /// Returns `true` if the impls are the same polarity and the trait either /// has no items or is annotated #[marker] and prevents item overrides. pub fn impls_are_allowed_to_overlap(self, def_id1: DefId, def_id2: DefId) -> bool { if self.features().overlapping_marker_traits { @@ -2761,6 +2854,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::InstanceDef::Item(did) => { self.optimized_mir(did) } + ty::InstanceDef::VtableShim(..) | ty::InstanceDef::Intrinsic(..) | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::Virtual(..) | @@ -2791,12 +2885,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// Determine whether an item is annotated with an attribute + /// Determine whether an item is annotated with an attribute. pub fn has_attr(self, did: DefId, attr: &str) -> bool { attr::contains_name(&self.get_attrs(did), attr) } - /// Returns true if this is an `auto trait`. + /// Returns `true` if this is an `auto trait`. pub fn trait_is_auto(self, trait_def_id: DefId) -> bool { self.trait_def(trait_def_id).has_auto_impl } @@ -2805,14 +2899,14 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.optimized_mir(def_id).generator_layout.as_ref().unwrap() } - /// Given the def_id of an impl, return the def_id of the trait it implements. + /// Given the def-id of an impl, return the def_id of the trait it implements. /// If it implements no trait, return `None`. pub fn trait_id_of_impl(self, def_id: DefId) -> Option { self.impl_trait_ref(def_id).map(|tr| tr.def_id) } - /// If the given def ID describes a method belonging to an impl, return the - /// ID of the impl that the method belongs to. Otherwise, return `None`. + /// If the given defid describes a method belonging to an impl, return the + /// def-id of the impl that the method belongs to. Otherwise, return `None`. pub fn impl_of_method(self, def_id: DefId) -> Option { let item = if def_id.krate != LOCAL_CRATE { if let Some(Def::Method(_)) = self.describe_def(def_id) { @@ -2866,6 +2960,22 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } +pub struct AssociatedItemsIterator<'a, 'gcx: 'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_ids: Lrc>, + next_index: usize, +} + +impl Iterator for AssociatedItemsIterator<'_, '_, '_> { + type Item = AssociatedItem; + + fn next(&mut self) -> Option { + let def_id = self.def_ids.get(self.next_index)?; + self.next_index += 1; + Some(self.tcx.associated_item(*def_id)) + } +} + impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn with_freevars(self, fid: NodeId, f: F) -> T where F: FnOnce(&[hir::Freevar]) -> T, @@ -2977,7 +3087,7 @@ fn trait_of_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Option }) } -/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition +/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition. pub fn is_impl_trait_defn(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option { if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { if let Node::Item(item) = tcx.hir.get(node_id) { @@ -2989,7 +3099,19 @@ pub fn is_impl_trait_defn(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option, def_id: DefId) -> bool { + if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { + if let Node::Item(item) = tcx.hir.get(node_id) { + if let hir::ItemKind::TraitAlias(..) = item.node { + return true; + } + } + } + false +} + +/// See `ParamEnv` struct definition for details. fn param_env<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ParamEnv<'tcx> @@ -3085,7 +3207,7 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) { /// rather, you should request the vector for a specific type via /// `tcx.inherent_impls(def_id)` so as to minimize your dependencies /// (constructing this map requires touching the entire crate). -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct CrateInherentImpls { pub inherent_impls: DefIdMap>>, } diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs index b49664b624..0e3fc62e4c 100644 --- a/src/librustc/ty/outlives.rs +++ b/src/librustc/ty/outlives.rs @@ -12,6 +12,7 @@ // refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that // RFC for reference. +use smallvec::SmallVec; use ty::{self, Ty, TyCtxt, TypeFoldable}; #[derive(Debug)] @@ -55,17 +56,15 @@ pub enum Component<'tcx> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - /// Returns all the things that must outlive `'a` for the condition + /// Push onto `out` all the things that must outlive `'a` for the condition /// `ty0: 'a` to hold. Note that `ty0` must be a **fully resolved type**. - pub fn outlives_components(&self, ty0: Ty<'tcx>) - -> Vec> { - let mut components = vec![]; - self.compute_components(ty0, &mut components); - debug!("components({:?}) = {:?}", ty0, components); - components + pub fn push_outlives_components(&self, ty0: Ty<'tcx>, + out: &mut SmallVec<[Component<'tcx>; 4]>) { + self.compute_components(ty0, out); + debug!("components({:?}) = {:?}", ty0, out); } - fn compute_components(&self, ty: Ty<'tcx>, out: &mut Vec>) { + fn compute_components(&self, ty: Ty<'tcx>, out: &mut SmallVec<[Component<'tcx>; 4]>) { // Descend through the types, looking for the various "base" // components and collecting them into `out`. This is not written // with `collect()` because of the need to sometimes skip subtrees @@ -106,7 +105,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // we simply fallback to the most restrictive rule, which // requires that `Pi: 'a` for all `i`. ty::Projection(ref data) => { - if !data.has_escaping_regions() { + if !data.has_escaping_bound_vars() { // best case: no escaping regions, so push the // projection and skip the subtree (thus generating no // constraints for Pi). This defers the choice between @@ -156,6 +155,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::FnDef(..) | // OutlivesFunction (*) ty::FnPtr(_) | // OutlivesFunction (*) ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*) + ty::Placeholder(..) | + ty::Bound(..) | ty::Error => { // (*) Bare functions and traits are both binders. In the // RFC, this means we would add the bound regions to the @@ -163,7 +164,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // list is maintained explicitly, because bound regions // themselves can be readily identified. - push_region_constraints(out, ty.regions()); + push_region_constraints(ty, out); for subty in ty.walk_shallow() { self.compute_components(subty, out); } @@ -172,15 +173,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } fn capture_components(&self, ty: Ty<'tcx>) -> Vec> { - let mut temp = vec![]; - push_region_constraints(&mut temp, ty.regions()); + let mut temp = smallvec![]; + push_region_constraints(ty, &mut temp); for subty in ty.walk_shallow() { self.compute_components(subty, &mut temp); } - temp + temp.into_iter().collect() } } -fn push_region_constraints<'tcx>(out: &mut Vec>, regions: Vec>) { +fn push_region_constraints<'tcx>(ty: Ty<'tcx>, out: &mut SmallVec<[Component<'tcx>; 4]>) { + let mut regions = smallvec![]; + ty.push_regions(&mut regions); out.extend(regions.iter().filter(|&r| !r.is_late_bound()).map(|r| Component::Region(r))); } diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index 0f6ff93c52..5d12aaeed5 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -296,6 +296,30 @@ impl<'tcx> QueryDescription<'tcx> for queries::reachable_set<'tcx> { } impl<'tcx> QueryDescription<'tcx> for queries::const_eval<'tcx> { + fn describe( + tcx: TyCtxt<'_, '_, '_>, + key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, + ) -> Cow<'static, str> { + format!( + "const-evaluating + checking `{}`", + tcx.item_path_str(key.value.instance.def.def_id()), + ).into() + } + + #[inline] + fn cache_on_disk(_key: Self::Key) -> bool { + true + } + + #[inline] + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id).map(Ok) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::const_eval_raw<'tcx> { fn describe(tcx: TyCtxt<'_, '_, '_>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) -> Cow<'static, str> { @@ -567,7 +591,7 @@ impl<'tcx> QueryDescription<'tcx> for queries::plugin_registrar_fn<'tcx> { } } -impl<'tcx> QueryDescription<'tcx> for queries::derive_registrar_fn<'tcx> { +impl<'tcx> QueryDescription<'tcx> for queries::proc_macro_decls_static<'tcx> { fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { "looking up the derive registrar for a crate".into() } diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs index a59a15da08..699c2d111c 100644 --- a/src/librustc/ty/query/mod.rs +++ b/src/librustc/ty/query/mod.rs @@ -27,7 +27,7 @@ use middle::stability::{self, DeprecationEntry}; use middle::lib_features::LibFeatures; use middle::lang_items::{LanguageItems, LangItem}; use middle::exported_symbols::{SymbolExportLevel, ExportedSymbol}; -use mir::interpret::ConstEvalResult; +use mir::interpret::{ConstEvalRawResult, ConstEvalResult}; use mir::mono::CodegenUnit; use mir; use mir::interpret::GlobalId; @@ -127,17 +127,18 @@ define_queries! { <'tcx> /// predicate gets in the way of some checks, which are intended /// to operate over only the actual where-clauses written by the /// user.) - [] fn predicates_of: PredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, + [] fn predicates_of: PredicatesOfItem(DefId) -> Lrc>, /// Maps from the def-id of an item (trait/struct/enum/fn) to the /// predicates (where clauses) directly defined on it. This is /// equal to the `explicit_predicates_of` predicates plus the /// `inferred_outlives_of` predicates. - [] fn predicates_defined_on: PredicatesDefinedOnItem(DefId) -> ty::GenericPredicates<'tcx>, + [] fn predicates_defined_on: PredicatesDefinedOnItem(DefId) + -> Lrc>, /// Returns the predicates written explicit by the user. [] fn explicit_predicates_of: ExplicitPredicatesOfItem(DefId) - -> ty::GenericPredicates<'tcx>, + -> Lrc>, /// Returns the inferred outlives predicates (e.g., for `struct /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`). @@ -149,12 +150,12 @@ define_queries! { <'tcx> /// evaluate them even during type conversion, often before the /// full predicates are available (note that supertraits have /// additional acyclicity requirements). - [] fn super_predicates_of: SuperPredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, + [] fn super_predicates_of: SuperPredicatesOfItem(DefId) -> Lrc>, /// To avoid cycles within the predicates of a single item we compute /// per-type-parameter predicates for resolving `T::AssocTy`. [] fn type_param_predicates: type_param_predicates((DefId, DefId)) - -> ty::GenericPredicates<'tcx>, + -> Lrc>, [] fn trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef, [] fn adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef, @@ -291,7 +292,8 @@ define_queries! { <'tcx> /// Gets a complete map from all types to their inherent impls. /// Not meant to be used directly outside of coherence. /// (Defined only for LOCAL_CRATE) - [] fn crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) -> CrateInherentImpls, + [] fn crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) + -> Lrc, /// Checks all types in the krate for overlap in their inherent impls. Reports errors. /// Not meant to be used directly outside of coherence. @@ -301,6 +303,14 @@ define_queries! { <'tcx> }, Other { + /// Evaluate a constant without running sanity checks + /// + /// DO NOT USE THIS outside const eval. Const eval uses this to break query cycles during + /// validation. Please add a comment to every use site explaining why using `const_eval` + /// isn't sufficient + [] fn const_eval_raw: const_eval_raw_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) + -> ConstEvalRawResult<'tcx>, + /// Results of evaluating const items or constants embedded in /// other items (such as enum variant explicit discriminants). [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) @@ -460,7 +470,7 @@ define_queries! { <'tcx> [] fn foreign_modules: ForeignModules(CrateNum) -> Lrc>, [] fn plugin_registrar_fn: PluginRegistrarFn(CrateNum) -> Option, - [] fn derive_registrar_fn: DeriveRegistrarFn(CrateNum) -> Option, + [] fn proc_macro_decls_static: ProcMacroDeclsStatic(CrateNum) -> Option, [] fn crate_disambiguator: CrateDisambiguator(CrateNum) -> CrateDisambiguator, [] fn crate_hash: CrateHash(CrateNum) -> Svh, [] fn original_crate_name: OriginalCrateName(CrateNum) -> Symbol, @@ -679,7 +689,7 @@ define_queries! { <'tcx> ) -> Clauses<'tcx>, // Get the chalk-style environment of the given item. - [] fn environment: Environment(DefId) -> traits::Environment<'tcx>, + [] fn environment: Environment(DefId) -> ty::Binder>, }, Linking { @@ -776,6 +786,10 @@ fn const_eval_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) -> DepConstructor<'tcx> { DepConstructor::ConstEval { param_env } } +fn const_eval_raw_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) + -> DepConstructor<'tcx> { + DepConstructor::ConstEvalRaw { param_env } +} fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { DepConstructor::MirKeys diff --git a/src/librustc/ty/query/on_disk_cache.rs b/src/librustc/ty/query/on_disk_cache.rs index 636720bf3c..7d3ae64f4f 100644 --- a/src/librustc/ty/query/on_disk_cache.rs +++ b/src/librustc/ty/query/on_disk_cache.rs @@ -25,7 +25,7 @@ use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque, use session::{CrateDisambiguator, Session}; use std::mem; use syntax::ast::NodeId; -use syntax::source_map::{SourceMap, StableFilemapId}; +use syntax::source_map::{SourceMap, StableSourceFileId}; use syntax_pos::{BytePos, Span, DUMMY_SP, SourceFile}; use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo}; use ty; @@ -62,7 +62,7 @@ pub struct OnDiskCache<'sess> { cnum_map: Once>>, source_map: &'sess SourceMap, - file_index_to_stable_id: FxHashMap, + file_index_to_stable_id: FxHashMap, // These two fields caches that are populated lazily during decoding. file_index_to_file: Lock>>, @@ -82,7 +82,7 @@ pub struct OnDiskCache<'sess> { // This type is used only for (de-)serialization. #[derive(RustcEncodable, RustcDecodable)] struct Footer { - file_index_to_stable_id: FxHashMap, + file_index_to_stable_id: FxHashMap, prev_cnums: Vec<(u32, String, CrateDisambiguator)>, query_result_index: EncodedQueryResultIndex, diagnostics_index: EncodedQueryResultIndex, @@ -174,14 +174,17 @@ impl<'sess> OnDiskCache<'sess> { tcx.dep_graph.with_ignore(|| { // Allocate SourceFileIndices let (file_to_file_index, file_index_to_stable_id) = { - let mut file_to_file_index = FxHashMap::default(); - let mut file_index_to_stable_id = FxHashMap::default(); + let files = tcx.sess.source_map().files(); + let mut file_to_file_index = FxHashMap::with_capacity_and_hasher( + files.len(), Default::default()); + let mut file_index_to_stable_id = FxHashMap::with_capacity_and_hasher( + files.len(), Default::default()); - for (index, file) in tcx.sess.source_map().files().iter().enumerate() { + for (index, file) in files.iter().enumerate() { let index = SourceFileIndex(index as u32); let file_ptr: *const SourceFile = &**file as *const _; file_to_file_index.insert(file_ptr, index); - file_index_to_stable_id.insert(index, StableFilemapId::new(&file)); + file_index_to_stable_id.insert(index, StableSourceFileId::new(&file)); } (file_to_file_index, file_index_to_stable_id) @@ -278,7 +281,7 @@ impl<'sess> OnDiskCache<'sess> { // otherwise, abort break; } - interpret_alloc_index.reserve(new_n); + interpret_alloc_index.reserve(new_n - n); for idx in n..new_n { let id = encoder.interpret_allocs_inverse[idx]; let pos = encoder.position() as u32; @@ -447,8 +450,7 @@ impl<'sess> OnDiskCache<'sess> { .map(|&(cnum, ..)| cnum) .max() .unwrap_or(0) + 1; - let mut map = IndexVec::new(); - map.resize(map_size as usize, None); + let mut map = IndexVec::from_elem_n(None, map_size as usize); for &(prev_cnum, ref crate_name, crate_disambiguator) in prev_cnums { let key = (crate_name.clone(), crate_disambiguator); @@ -473,7 +475,7 @@ struct CacheDecoder<'a, 'tcx: 'a, 'x> { cnum_map: &'x IndexVec>, synthetic_expansion_infos: &'x Lock>, file_index_to_file: &'x Lock>>, - file_index_to_stable_id: &'x FxHashMap, + file_index_to_stable_id: &'x FxHashMap, alloc_decoding_session: AllocDecodingSession<'x>, } diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 789658dcf7..5f33d466c4 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -100,7 +100,7 @@ pub(super) struct JobOwner<'a, 'tcx: 'a, Q: QueryDescription<'tcx> + 'a> { } impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { - /// Either gets a JobOwner corresponding the the query, allowing us to + /// Either gets a JobOwner corresponding the query, allowing us to /// start executing the query, or it returns with the result of the query. /// If the query is executing elsewhere, this will wait for it. /// If the query panicked, this will silently panic. @@ -314,7 +314,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Try to read a node index for the node dep_node. /// A node will have an index, when it's already been marked green, or when we can mark it /// green. This function will mark the current task as a reader of the specified node, when - /// the a node index can be found for that node. + /// a node index can be found for that node. pub(super) fn try_mark_green_and_read(self, dep_node: &DepNode) -> Option { match self.dep_graph.node_color(dep_node) { Some(DepNodeColor::Green(dep_node_index)) => { @@ -1063,6 +1063,7 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, DepKind::NeedsDrop | DepKind::Layout | DepKind::ConstEval | + DepKind::ConstEvalRaw | DepKind::InstanceSymbolName | DepKind::MirShim | DepKind::BorrowCheckKrate | @@ -1199,7 +1200,7 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, DepKind::ReachableNonGenerics => { force!(reachable_non_generics, krate!()); } DepKind::NativeLibraries => { force!(native_libraries, krate!()); } DepKind::PluginRegistrarFn => { force!(plugin_registrar_fn, krate!()); } - DepKind::DeriveRegistrarFn => { force!(derive_registrar_fn, krate!()); } + DepKind::ProcMacroDeclsStatic => { force!(proc_macro_decls_static, krate!()); } DepKind::CrateDisambiguator => { force!(crate_disambiguator, krate!()); } DepKind::CrateHash => { force!(crate_hash, krate!()); } DepKind::OriginalCrateName => { force!(original_crate_name, krate!()); } diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs index 082c1bd5fe..1b64a68679 100644 --- a/src/librustc/ty/relate.rs +++ b/src/librustc/ty/relate.rs @@ -25,6 +25,7 @@ use std::rc::Rc; use std::iter; use rustc_target::spec::abi; use hir as ast; +use traits; pub type RelateResult<'tcx, T> = Result>; @@ -371,6 +372,10 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, bug!("var types encountered in super_relate_tys") } + (ty::Bound(..), _) | (_, ty::Bound(..)) => { + bug!("bound types encountered in super_relate_tys") + } + (&ty::Error, _) | (_, &ty::Error) => { Ok(tcx.types.err) @@ -394,6 +399,10 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, Ok(a) } + (ty::Placeholder(p1), ty::Placeholder(p2)) if p1 == p2 => { + Ok(a) + } + (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs)) if a_def == b_def => { @@ -556,8 +565,13 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, Ok(tcx.mk_fn_ptr(fty)) } - (&ty::Projection(ref a_data), &ty::Projection(ref b_data)) => - { + (ty::UnnormalizedProjection(a_data), ty::UnnormalizedProjection(b_data)) => { + let projection_ty = relation.relate(a_data, b_data)?; + Ok(tcx.mk_ty(ty::UnnormalizedProjection(projection_ty))) + } + + // these two are already handled downstream in case of lazy normalization + (ty::Projection(a_data), ty::Projection(b_data)) => { let projection_ty = relation.relate(a_data, b_data)?; Ok(tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs)) } @@ -710,6 +724,283 @@ impl<'tcx> Relate<'tcx> for Kind<'tcx> { } } +impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &ty::TraitPredicate<'tcx>, + b: &ty::TraitPredicate<'tcx> + ) -> RelateResult<'tcx, ty::TraitPredicate<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + Ok(ty::TraitPredicate { + trait_ref: relation.relate(&a.trait_ref, &b.trait_ref)?, + }) + } +} + +impl<'tcx> Relate<'tcx> for ty::ProjectionPredicate<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &ty::ProjectionPredicate<'tcx>, + b: &ty::ProjectionPredicate<'tcx>, + ) -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + Ok(ty::ProjectionPredicate { + projection_ty: relation.relate(&a.projection_ty, &b.projection_ty)?, + ty: relation.relate(&a.ty, &b.ty)?, + }) + } +} + +impl<'tcx> Relate<'tcx> for traits::WhereClause<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::WhereClause<'tcx>, + b: &traits::WhereClause<'tcx> + ) -> RelateResult<'tcx, traits::WhereClause<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + use traits::WhereClause::*; + match (a, b) { + (Implemented(a_pred), Implemented(b_pred)) => { + Ok(Implemented(relation.relate(a_pred, b_pred)?)) + } + + (ProjectionEq(a_pred), ProjectionEq(b_pred)) => { + Ok(ProjectionEq(relation.relate(a_pred, b_pred)?)) + } + + (RegionOutlives(a_pred), RegionOutlives(b_pred)) => { + Ok(RegionOutlives(ty::OutlivesPredicate( + relation.relate(&a_pred.0, &b_pred.0)?, + relation.relate(&a_pred.1, &b_pred.1)?, + ))) + } + + (TypeOutlives(a_pred), TypeOutlives(b_pred)) => { + Ok(TypeOutlives(ty::OutlivesPredicate( + relation.relate(&a_pred.0, &b_pred.0)?, + relation.relate(&a_pred.1, &b_pred.1)?, + ))) + } + + _ => Err(TypeError::Mismatch), + } + } +} + +impl<'tcx> Relate<'tcx> for traits::WellFormed<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::WellFormed<'tcx>, + b: &traits::WellFormed<'tcx> + ) -> RelateResult<'tcx, traits::WellFormed<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + use traits::WellFormed::*; + match (a, b) { + (Trait(a_pred), Trait(b_pred)) => Ok(Trait(relation.relate(a_pred, b_pred)?)), + (Ty(a_ty), Ty(b_ty)) => Ok(Ty(relation.relate(a_ty, b_ty)?)), + _ => Err(TypeError::Mismatch), + } + } +} + +impl<'tcx> Relate<'tcx> for traits::FromEnv<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::FromEnv<'tcx>, + b: &traits::FromEnv<'tcx> + ) -> RelateResult<'tcx, traits::FromEnv<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + use traits::FromEnv::*; + match (a, b) { + (Trait(a_pred), Trait(b_pred)) => Ok(Trait(relation.relate(a_pred, b_pred)?)), + (Ty(a_ty), Ty(b_ty)) => Ok(Ty(relation.relate(a_ty, b_ty)?)), + _ => Err(TypeError::Mismatch), + } + } +} + +impl<'tcx> Relate<'tcx> for traits::DomainGoal<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::DomainGoal<'tcx>, + b: &traits::DomainGoal<'tcx> + ) -> RelateResult<'tcx, traits::DomainGoal<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + use traits::DomainGoal::*; + match (a, b) { + (Holds(a_wc), Holds(b_wc)) => Ok(Holds(relation.relate(a_wc, b_wc)?)), + (WellFormed(a_wf), WellFormed(b_wf)) => Ok(WellFormed(relation.relate(a_wf, b_wf)?)), + (FromEnv(a_fe), FromEnv(b_fe)) => Ok(FromEnv(relation.relate(a_fe, b_fe)?)), + + (Normalize(a_pred), Normalize(b_pred)) => { + Ok(Normalize(relation.relate(a_pred, b_pred)?)) + } + + _ => Err(TypeError::Mismatch), + } + } +} + +impl<'tcx> Relate<'tcx> for traits::Goal<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::Goal<'tcx>, + b: &traits::Goal<'tcx> + ) -> RelateResult<'tcx, traits::Goal<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + use traits::GoalKind::*; + match (a, b) { + (Implies(a_clauses, a_goal), Implies(b_clauses, b_goal)) => { + let clauses = relation.relate(a_clauses, b_clauses)?; + let goal = relation.relate(a_goal, b_goal)?; + Ok(relation.tcx().mk_goal(Implies(clauses, goal))) + } + + (And(a_left, a_right), And(b_left, b_right)) => { + let left = relation.relate(a_left, b_left)?; + let right = relation.relate(a_right, b_right)?; + Ok(relation.tcx().mk_goal(And(left, right))) + } + + (Not(a_goal), Not(b_goal)) => { + let goal = relation.relate(a_goal, b_goal)?; + Ok(relation.tcx().mk_goal(Not(goal))) + } + + (DomainGoal(a_goal), DomainGoal(b_goal)) => { + let goal = relation.relate(a_goal, b_goal)?; + Ok(relation.tcx().mk_goal(DomainGoal(goal))) + } + + (Quantified(a_qkind, a_goal), Quantified(b_qkind, b_goal)) + if a_qkind == b_qkind => + { + let goal = relation.relate(a_goal, b_goal)?; + Ok(relation.tcx().mk_goal(Quantified(*a_qkind, goal))) + } + + (CannotProve, CannotProve) => Ok(*a), + + _ => Err(TypeError::Mismatch), + } + } +} + +impl<'tcx> Relate<'tcx> for traits::Goals<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::Goals<'tcx>, + b: &traits::Goals<'tcx> + ) -> RelateResult<'tcx, traits::Goals<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + if a.len() != b.len() { + return Err(TypeError::Mismatch); + } + + let tcx = relation.tcx(); + let goals = a.iter().zip(b.iter()).map(|(a, b)| relation.relate(a, b)); + Ok(tcx.mk_goals(goals)?) + } +} + +impl<'tcx> Relate<'tcx> for traits::Clause<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::Clause<'tcx>, + b: &traits::Clause<'tcx> + ) -> RelateResult<'tcx, traits::Clause<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + use traits::Clause::*; + match (a, b) { + (Implies(a_clause), Implies(b_clause)) => { + let clause = relation.relate(a_clause, b_clause)?; + Ok(Implies(clause)) + } + + (ForAll(a_clause), ForAll(b_clause)) => { + let clause = relation.relate(a_clause, b_clause)?; + Ok(ForAll(clause)) + } + + _ => Err(TypeError::Mismatch), + } + } +} + +impl<'tcx> Relate<'tcx> for traits::Clauses<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::Clauses<'tcx>, + b: &traits::Clauses<'tcx> + ) -> RelateResult<'tcx, traits::Clauses<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + if a.len() != b.len() { + return Err(TypeError::Mismatch); + } + + let tcx = relation.tcx(); + let clauses = a.iter().zip(b.iter()).map(|(a, b)| relation.relate(a, b)); + Ok(tcx.mk_clauses(clauses)?) + } +} + +impl<'tcx> Relate<'tcx> for traits::ProgramClause<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::ProgramClause<'tcx>, + b: &traits::ProgramClause<'tcx> + ) -> RelateResult<'tcx, traits::ProgramClause<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + Ok(traits::ProgramClause { + goal: relation.relate(&a.goal, &b.goal)?, + hypotheses: relation.relate(&a.hypotheses, &b.hypotheses)?, + category: traits::ProgramClauseCategory::Other, + }) + } +} + +impl<'tcx> Relate<'tcx> for traits::Environment<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::Environment<'tcx>, + b: &traits::Environment<'tcx> + ) -> RelateResult<'tcx, traits::Environment<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + Ok(traits::Environment { + clauses: relation.relate(&a.clauses, &b.clauses)?, + }) + } +} + +impl<'tcx, G> Relate<'tcx> for traits::InEnvironment<'tcx, G> + where G: Relate<'tcx> +{ + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &traits::InEnvironment<'tcx, G>, + b: &traits::InEnvironment<'tcx, G> + ) -> RelateResult<'tcx, traits::InEnvironment<'tcx, G>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a + { + Ok(traits::InEnvironment { + environment: relation.relate(&a.environment, &b.environment)?, + goal: relation.relate(&a.goal, &b.goal)?, + }) + } +} + /////////////////////////////////////////////////////////////////////////// // Error handling diff --git a/src/librustc/ty/steal.rs b/src/librustc/ty/steal.rs index 1092e23ec3..fc3353e339 100644 --- a/src/librustc/ty/steal.rs +++ b/src/librustc/ty/steal.rs @@ -9,7 +9,6 @@ // except according to those terms. use rustc_data_structures::sync::{RwLock, ReadGuard, MappedReadGuard}; -use std::mem; /// The `Steal` struct is intended to used as the value for a query. /// Specifically, we sometimes have queries (*cough* MIR *cough*) @@ -51,7 +50,7 @@ impl Steal { pub fn steal(&self) -> T { let value_ref = &mut *self.value.try_write().expect("stealing value which is locked"); - let value = mem::replace(value_ref, None); + let value = value_ref.take(); value.expect("attempt to read from stolen value") } } diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index e86a790b50..d6aeb288b5 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -14,7 +14,7 @@ //! `BraceStructLiftImpl!`) to help with the tedium. use mir::ProjectionKind; -use mir::interpret::{ConstValue, ConstEvalErr}; +use mir::interpret::ConstValue; use ty::{self, Lift, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -33,6 +33,7 @@ CloneTypeFoldableAndLiftImpls! { (), bool, usize, + ::ty::layout::VariantIdx, u64, ::middle::region::Scope, ::syntax::ast::FloatTy, @@ -455,176 +456,19 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { ProjectionMismatched(x) => ProjectionMismatched(x), ProjectionBoundsLength(x) => ProjectionBoundsLength(x), Sorts(ref x) => return tcx.lift(x).map(Sorts), - OldStyleLUB(ref x) => return tcx.lift(x).map(OldStyleLUB), ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch) }) } } -impl<'a, 'tcx> Lift<'tcx> for ConstEvalErr<'a> { - type Lifted = ConstEvalErr<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.error).map(|error| { - ConstEvalErr { - span: self.span, - stacktrace: self.stacktrace.clone(), - error, - } - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for interpret::EvalError<'a> { - type Lifted = interpret::EvalError<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - Some(interpret::EvalError { - kind: tcx.lift(&self.kind)?, - }) - } -} - -impl<'a, 'tcx, O: Lift<'tcx>> Lift<'tcx> for interpret::EvalErrorKind<'a, O> { - type Lifted = interpret::EvalErrorKind<'tcx, >::Lifted>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - use ::mir::interpret::EvalErrorKind::*; - Some(match *self { - MachineError(ref err) => MachineError(err.clone()), - FunctionAbiMismatch(a, b) => FunctionAbiMismatch(a, b), - FunctionArgMismatch(a, b) => FunctionArgMismatch( - tcx.lift(&a)?, - tcx.lift(&b)?, - ), - FunctionRetMismatch(a, b) => FunctionRetMismatch( - tcx.lift(&a)?, - tcx.lift(&b)?, - ), - FunctionArgCountMismatch => FunctionArgCountMismatch, - NoMirFor(ref s) => NoMirFor(s.clone()), - UnterminatedCString(ptr) => UnterminatedCString(ptr), - DanglingPointerDeref => DanglingPointerDeref, - DoubleFree => DoubleFree, - InvalidMemoryAccess => InvalidMemoryAccess, - InvalidFunctionPointer => InvalidFunctionPointer, - InvalidBool => InvalidBool, - InvalidDiscriminant(val) => InvalidDiscriminant(val), - PointerOutOfBounds { - ptr, - access, - allocation_size, - } => PointerOutOfBounds { ptr, access, allocation_size }, - InvalidNullPointerUsage => InvalidNullPointerUsage, - ReadPointerAsBytes => ReadPointerAsBytes, - ReadBytesAsPointer => ReadBytesAsPointer, - ReadForeignStatic => ReadForeignStatic, - InvalidPointerMath => InvalidPointerMath, - ReadUndefBytes(offset) => ReadUndefBytes(offset), - DeadLocal => DeadLocal, - InvalidBoolOp(bop) => InvalidBoolOp(bop), - Unimplemented(ref s) => Unimplemented(s.clone()), - DerefFunctionPointer => DerefFunctionPointer, - ExecuteMemory => ExecuteMemory, - BoundsCheck { ref len, ref index } => BoundsCheck { - len: tcx.lift(len)?, - index: tcx.lift(index)?, - }, - Intrinsic(ref s) => Intrinsic(s.clone()), - InvalidChar(c) => InvalidChar(c), - StackFrameLimitReached => StackFrameLimitReached, - OutOfTls => OutOfTls, - TlsOutOfBounds => TlsOutOfBounds, - AbiViolation(ref s) => AbiViolation(s.clone()), - AlignmentCheckFailed { - required, - has, - } => AlignmentCheckFailed { required, has }, - MemoryLockViolation { - ptr, - len, - frame, - access, - ref lock, - } => MemoryLockViolation { ptr, len, frame, access, lock: lock.clone() }, - MemoryAcquireConflict { - ptr, - len, - kind, - ref lock, - } => MemoryAcquireConflict { ptr, len, kind, lock: lock.clone() }, - InvalidMemoryLockRelease { - ptr, - len, - frame, - ref lock, - } => InvalidMemoryLockRelease { ptr, len, frame, lock: lock.clone() }, - DeallocatedLockedMemory { - ptr, - ref lock, - } => DeallocatedLockedMemory { ptr, lock: lock.clone() }, - ValidationFailure(ref s) => ValidationFailure(s.clone()), - CalledClosureAsFunction => CalledClosureAsFunction, - VtableForArgumentlessMethod => VtableForArgumentlessMethod, - ModifiedConstantMemory => ModifiedConstantMemory, - AssumptionNotHeld => AssumptionNotHeld, - InlineAsm => InlineAsm, - TypeNotPrimitive(ty) => TypeNotPrimitive(tcx.lift(&ty)?), - ReallocatedWrongMemoryKind(ref a, ref b) => { - ReallocatedWrongMemoryKind(a.clone(), b.clone()) - }, - DeallocatedWrongMemoryKind(ref a, ref b) => { - DeallocatedWrongMemoryKind(a.clone(), b.clone()) - }, - ReallocateNonBasePtr => ReallocateNonBasePtr, - DeallocateNonBasePtr => DeallocateNonBasePtr, - IncorrectAllocationInformation(a, b, c, d) => { - IncorrectAllocationInformation(a, b, c, d) - }, - Layout(lay) => Layout(tcx.lift(&lay)?), - HeapAllocZeroBytes => HeapAllocZeroBytes, - HeapAllocNonPowerOfTwoAlignment(n) => HeapAllocNonPowerOfTwoAlignment(n), - Unreachable => Unreachable, - Panic { ref msg, ref file, line, col } => Panic { - msg: msg.clone(), - file: file.clone(), - line, col, - }, - ReadFromReturnPointer => ReadFromReturnPointer, - PathNotFound(ref v) => PathNotFound(v.clone()), - UnimplementedTraitSelection => UnimplementedTraitSelection, - TypeckError => TypeckError, - TooGeneric => TooGeneric, - CheckMatchError => CheckMatchError, - ReferencedConstant(ref err) => ReferencedConstant(tcx.lift(&**err)?.into()), - OverflowNeg => OverflowNeg, - Overflow(op) => Overflow(op), - DivisionByZero => DivisionByZero, - RemainderByZero => RemainderByZero, - GeneratorResumedAfterReturn => GeneratorResumedAfterReturn, - GeneratorResumedAfterPanic => GeneratorResumedAfterPanic, - InfiniteLoop => InfiniteLoop, - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::layout::LayoutError<'a> { - type Lifted = ty::layout::LayoutError<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - match *self { - ty::layout::LayoutError::Unknown(ref ty) => { - tcx.lift(ty).map(ty::layout::LayoutError::Unknown) - } - ty::layout::LayoutError::SizeOverflow(ref ty) => { - tcx.lift(ty).map(ty::layout::LayoutError::SizeOverflow) - } - } - } -} - impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> { type Lifted = ty::InstanceDef<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { match *self { ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)), + ty::InstanceDef::VtableShim(def_id) => + Some(ty::InstanceDef::VtableShim(def_id)), ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)), ty::InstanceDef::FnPtrShim(def_id, ref ty) => @@ -805,6 +649,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { substs: self.substs.fold_with(folder), def: match self.def { Item(did) => Item(did.fold_with(folder)), + VtableShim(did) => VtableShim(did.fold_with(folder)), Intrinsic(did) => Intrinsic(did.fold_with(folder)), FnPtrShim(did, ty) => FnPtrShim( did.fold_with(folder), @@ -833,7 +678,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { use ty::InstanceDef::*; self.substs.visit_with(visitor) || match self.def { - Item(did) | Intrinsic(did) | Virtual(did, _) => { + Item(did) | VtableShim(did) | Intrinsic(did) | Virtual(did, _) => { did.visit_with(visitor) }, FnPtrShim(did, ty) | CloneShim(did, ty) => { @@ -890,9 +735,20 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::UnnormalizedProjection(data.fold_with(folder)) } ty::Opaque(did, substs) => ty::Opaque(did, substs.fold_with(folder)), - ty::Bool | ty::Char | ty::Str | ty::Int(_) | - ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) | - ty::Param(..) | ty::Never | ty::Foreign(..) => return self + + ty::Bool | + ty::Char | + ty::Str | + ty::Int(_) | + ty::Uint(_) | + ty::Float(_) | + ty::Error | + ty::Infer(_) | + ty::Param(..) | + ty::Bound(..) | + ty::Placeholder(..) | + ty::Never | + ty::Foreign(..) => return self }; if self.sty == sty { @@ -927,9 +783,20 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { data.visit_with(visitor) } ty::Opaque(_, ref substs) => substs.visit_with(visitor), - ty::Bool | ty::Char | ty::Str | ty::Int(_) | - ty::Uint(_) | ty::Float(_) | ty::Error | ty::Infer(_) | - ty::Param(..) | ty::Never | ty::Foreign(..) => false, + + ty::Bool | + ty::Char | + ty::Str | + ty::Int(_) | + ty::Uint(_) | + ty::Float(_) | + ty::Error | + ty::Infer(_) | + ty::Bound(..) | + ty::Placeholder(..) | + ty::Param(..) | + ty::Never | + ty::Foreign(..) => false, } } @@ -1158,7 +1025,6 @@ EnumTypeFoldableImpl! { (ty::error::TypeError::ProjectionBoundsLength)(x), (ty::error::TypeError::Sorts)(x), (ty::error::TypeError::ExistentialMismatch)(x), - (ty::error::TypeError::OldStyleLUB)(x), } } diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 62e38ad9bf..1416cb17fe 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -22,6 +22,7 @@ use ty::{List, TyS, ParamEnvAnd, ParamEnv}; use util::captures::Captures; use mir::interpret::{Scalar, Pointer}; +use smallvec::SmallVec; use std::iter; use std::cmp::Ordering; use rustc_target::spec::abi; @@ -77,6 +78,17 @@ impl BoundRegion { _ => false, } } + + /// When canonicalizing, we replace unbound inference variables and free + /// regions with anonymous late bound regions. This method asserts that + /// we have an anonymous late bound region, which hence may refer to + /// a canonical variable. + pub fn assert_bound_var(&self) -> BoundVar { + match *self { + BoundRegion::BrAnon(var) => BoundVar::from_u32(var), + _ => bug!("bound region is not anonymous"), + } + } } /// N.B., If you change this, you'll probably want to change the corresponding @@ -188,6 +200,12 @@ pub enum TyKind<'tcx> { /// A type parameter; for example, `T` in `fn f(x: T) {} Param(ParamTy), + /// Bound type variable, used only when preparing a trait query. + Bound(ty::DebruijnIndex, BoundTy), + + /// A placeholder type - universally quantified higher-ranked type. + Placeholder(ty::PlaceholderType), + /// A type variable used during type checking. Infer(InferTy), @@ -196,6 +214,10 @@ pub enum TyKind<'tcx> { Error, } +// `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert!(MEM_SIZE_OF_TY_KIND: ::std::mem::size_of::>() == 24); + /// A closure can be modeled as a struct that looks like: /// /// struct Closure<'l0...'li, T0...Tj, CK, CS, U0...Uk> { @@ -613,7 +635,7 @@ impl<'tcx> Binder<&'tcx List>> { /// A complete reference to a trait. These take numerous guises in syntax, /// but perhaps the most recognizable form is in a where clause: /// -/// T : Foo +/// T: Foo /// /// This would be represented by a trait-reference where the def-id is the /// def-id for the trait `Foo` and the substs define `T` as parameter 0, @@ -623,8 +645,8 @@ impl<'tcx> Binder<&'tcx List>> { /// that case the `Self` parameter is absent from the substitutions. /// /// Note that a `TraitRef` introduces a level of region binding, to -/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a -/// U>` or higher-ranked object types. +/// account for higher-ranked trait bounds like `T: for<'a> Foo<&'a U>` +/// or higher-ranked object types. #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct TraitRef<'tcx> { pub def_id: DefId, @@ -636,7 +658,7 @@ impl<'tcx> TraitRef<'tcx> { TraitRef { def_id: def_id, substs: substs } } - /// Returns a TraitRef of the form `P0: Foo` where `Pi` + /// Returns a `TraitRef` of the form `P0: Foo` where `Pi` /// are the parameters defined on trait. pub fn identity<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> TraitRef<'tcx> { TraitRef { @@ -645,11 +667,12 @@ impl<'tcx> TraitRef<'tcx> { } } + #[inline] pub fn self_ty(&self) -> Ty<'tcx> { self.substs.type_at(0) } - pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { // Select only the "input types" from a trait-reference. For // now this is all the types that appear in the // trait-reference, but it should eventually exclude @@ -727,8 +750,8 @@ impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> { - // otherwise the escaping regions would be captured by the binder - // debug_assert!(!self_ty.has_escaping_regions()); + // otherwise the escaping vars would be captured by the binder + // debug_assert!(!self_ty.has_escaping_bound_vars()); ty::TraitRef { def_id: self.def_id, @@ -755,11 +778,11 @@ impl<'tcx> PolyExistentialTraitRef<'tcx> { } } -/// Binder is a binder for higher-ranked lifetimes. It is part of the +/// Binder is a binder for higher-ranked lifetimes or types. It is part of the /// compiler's representation for things like `for<'a> Fn(&'a isize)` /// (which would be represented by the type `PolyTraitRef == /// Binder`). Note that when we instantiate, -/// erase, or otherwise "discharge" these bound regions, we change the +/// erase, or otherwise "discharge" these bound vars, we change the /// type from `Binder` to just `T` (see /// e.g. `liberate_late_bound_regions`). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] @@ -767,29 +790,28 @@ pub struct Binder(T); impl Binder { /// Wraps `value` in a binder, asserting that `value` does not - /// contain any bound regions that would be bound by the + /// contain any bound vars that would be bound by the /// binder. This is commonly used to 'inject' a value T into a /// different binding level. pub fn dummy<'tcx>(value: T) -> Binder where T: TypeFoldable<'tcx> { - debug_assert!(!value.has_escaping_regions()); + debug_assert!(!value.has_escaping_bound_vars()); Binder(value) } - /// Wraps `value` in a binder, binding late-bound regions (if any). - pub fn bind<'tcx>(value: T) -> Binder - { + /// Wraps `value` in a binder, binding higher-ranked vars (if any). + pub fn bind<'tcx>(value: T) -> Binder { Binder(value) } /// Skips the binder and returns the "bound" value. This is a /// risky thing to do because it's easy to get confused about /// debruijn indices and the like. It is usually better to - /// discharge the binder using `no_late_bound_regions` or + /// discharge the binder using `no_bound_vars` or /// `replace_late_bound_regions` or something like /// that. `skip_binder` is only valid when you are either - /// extracting data that has nothing to do with bound regions, you + /// extracting data that has nothing to do with bound vars, you /// are doing some sort of test that does not involve bound /// regions, or you are being very careful about your depth /// accounting. @@ -798,7 +820,7 @@ impl Binder { /// /// - extracting the def-id from a PolyTraitRef; /// - comparing the self type of a PolyTraitRef to see if it is equal to - /// a type parameter `X`, since the type `X` does not reference any regions + /// a type parameter `X`, since the type `X` does not reference any regions pub fn skip_binder(&self) -> &T { &self.0 } @@ -820,19 +842,19 @@ impl Binder { } /// Unwraps and returns the value within, but only if it contains - /// no bound regions at all. (In other words, if this binder -- + /// no bound vars at all. (In other words, if this binder -- /// and indeed any enclosing binder -- doesn't bind anything at /// all.) Otherwise, returns `None`. /// /// (One could imagine having a method that just unwraps a single - /// binder, but permits late-bound regions bound by enclosing + /// binder, but permits late-bound vars bound by enclosing /// binders, but that would require adjusting the debruijn /// indices, and given the shallow binding structure we often use, /// would not be that useful.) - pub fn no_late_bound_regions<'tcx>(self) -> Option - where T : TypeFoldable<'tcx> + pub fn no_bound_vars<'tcx>(self) -> Option + where T: TypeFoldable<'tcx> { - if self.skip_binder().has_escaping_regions() { + if self.skip_binder().has_escaping_bound_vars() { None } else { Some(self.skip_binder().clone()) @@ -873,16 +895,16 @@ pub struct ProjectionTy<'tcx> { /// The parameters of the associated item. pub substs: &'tcx Substs<'tcx>, - /// The DefId of the TraitItem for the associated type N. + /// The `DefId` of the `TraitItem` for the associated type `N`. /// - /// Note that this is not the DefId of the TraitRef containing this - /// associated type, which is in tcx.associated_item(item_def_id).container. + /// Note that this is not the `DefId` of the `TraitRef` containing this + /// associated type, which is in `tcx.associated_item(item_def_id).container`. pub item_def_id: DefId, } impl<'a, 'tcx> ProjectionTy<'tcx> { - /// Construct a ProjectionTy by searching the trait from trait_ref for the - /// associated item named item_name. + /// Construct a `ProjectionTy` by searching the trait from `trait_ref` for the + /// associated item named `item_name`. pub fn from_ref_and_name( tcx: TyCtxt<'_, '_, '_>, trait_ref: ty::TraitRef<'tcx>, item_name: Ident ) -> ProjectionTy<'tcx> { @@ -957,15 +979,18 @@ impl<'tcx> FnSig<'tcx> { pub type PolyFnSig<'tcx> = Binder>; impl<'tcx> PolyFnSig<'tcx> { + #[inline] pub fn inputs(&self) -> Binder<&'tcx [Ty<'tcx>]> { self.map_bound_ref(|fn_sig| fn_sig.inputs()) } + #[inline] pub fn input(&self, index: usize) -> ty::Binder> { self.map_bound_ref(|fn_sig| fn_sig.inputs()[index]) } pub fn inputs_and_output(&self) -> ty::Binder<&'tcx List>> { self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output) } + #[inline] pub fn output(&self) -> ty::Binder> { self.map_bound_ref(|fn_sig| fn_sig.output()) } @@ -1117,7 +1142,7 @@ pub type Region<'tcx> = &'tcx RegionKind; /// /// [1]: http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ /// [2]: http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ -/// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/hrtb.html +/// [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/hrtb.html #[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable, PartialOrd, Ord)] pub enum RegionKind { // Region bound in a type or fn declaration which will be @@ -1147,7 +1172,7 @@ pub enum RegionKind { /// A placeholder region - basically the higher-ranked version of ReFree. /// Should not exist after typeck. - RePlaceholder(ty::Placeholder), + RePlaceholder(ty::PlaceholderRegion), /// Empty lifetime is for data that is never accessed. /// Bottom in the region lattice. We treat ReEmpty somewhat @@ -1166,9 +1191,6 @@ pub enum RegionKind { /// `ClosureRegionRequirements` that are produced by MIR borrowck. /// See `ClosureRegionRequirements` for more details. ReClosureBound(RegionVid), - - /// Canonicalized region, used only when preparing a trait query. - ReCanonical(BoundTyIndex), } impl<'tcx> serialize::UseSpecializedDecodable for Region<'tcx> {} @@ -1219,22 +1241,35 @@ pub enum InferTy { FreshTy(u32), FreshIntTy(u32), FreshFloatTy(u32), - - /// Bound type variable, used only when preparing a trait query. - BoundTy(BoundTy), } newtype_index! { - pub struct BoundTyIndex { .. } + pub struct BoundVar { .. } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct BoundTy { - pub level: DebruijnIndex, - pub var: BoundTyIndex, + pub var: BoundVar, + pub kind: BoundTyKind, } -impl_stable_hash_for!(struct BoundTy { level, var }); +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub enum BoundTyKind { + Anon, + Param(InternedString), +} + +impl_stable_hash_for!(struct BoundTy { var, kind }); +impl_stable_hash_for!(enum self::BoundTyKind { Anon, Param(a) }); + +impl From for BoundTy { + fn from(var: BoundVar) -> Self { + BoundTy { + var, + kind: BoundTyKind::Anon, + } + } +} /// A `ProjectionPredicate` for an `ExistentialTraitRef`. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] @@ -1264,7 +1299,7 @@ impl<'a, 'tcx, 'gcx> ExistentialProjection<'tcx> { -> ty::ProjectionPredicate<'tcx> { // otherwise the escaping regions would be captured by the binders - debug_assert!(!self_ty.has_escaping_regions()); + debug_assert!(!self_ty.has_escaping_bound_vars()); ty::ProjectionPredicate { projection_ty: ty::ProjectionTy { @@ -1363,7 +1398,6 @@ impl RegionKind { RegionKind::ReEmpty => false, RegionKind::ReErased => false, RegionKind::ReClosureBound(..) => false, - RegionKind::ReCanonical(..) => false, } } @@ -1433,7 +1467,7 @@ impl RegionKind { } ty::RePlaceholder(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; - flags = flags | TypeFlags::HAS_RE_SKOL; + flags = flags | TypeFlags::HAS_RE_PLACEHOLDER; } ty::ReLateBound(..) => { flags = flags | TypeFlags::HAS_RE_LATE_BOUND; @@ -1450,10 +1484,6 @@ impl RegionKind { } ty::ReErased => { } - ty::ReCanonical(..) => { - flags = flags | TypeFlags::HAS_FREE_REGIONS; - flags = flags | TypeFlags::HAS_CANONICAL_VARS; - } ty::ReClosureBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } @@ -1522,6 +1552,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_ty_var(&self) -> bool { match self.sty { Infer(TyVar(_)) => true, @@ -1706,6 +1737,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_integral(&self) -> bool { match self.sty { Infer(IntVar(_)) | Int(_) | Uint(_) => true, @@ -1736,6 +1768,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn is_fp(&self) -> bool { match self.sty { Infer(FloatVar(_)) | Float(_) => true, @@ -1819,6 +1852,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + #[inline] pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> { match self.sty { Adt(adt, _) => Some(adt), @@ -1826,28 +1860,27 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } - /// Returns the regions directly referenced from this type (but - /// not types reachable from this type via `walk_tys`). This - /// ignores late-bound regions binders. - pub fn regions(&self) -> Vec> { + /// Push onto `out` the regions directly referenced from this type (but not + /// types reachable from this type via `walk_tys`). This ignores late-bound + /// regions binders. + pub fn push_regions(&self, out: &mut SmallVec<[ty::Region<'tcx>; 4]>) { match self.sty { Ref(region, _, _) => { - vec![region] + out.push(region); } Dynamic(ref obj, region) => { - let mut v = vec![region]; - v.extend(obj.principal().skip_binder().substs.regions()); - v + out.push(region); + out.extend(obj.principal().skip_binder().substs.regions()); } Adt(_, substs) | Opaque(_, substs) => { - substs.regions().collect() + out.extend(substs.regions()) } Closure(_, ClosureSubsts { ref substs }) | Generator(_, GeneratorSubsts { ref substs }, _) => { - substs.regions().collect() + out.extend(substs.regions()) } Projection(ref data) | UnnormalizedProjection(ref data) => { - data.substs.regions().collect() + out.extend(data.substs.regions()) } FnDef(..) | FnPtr(_) | @@ -1865,10 +1898,10 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { Tuple(..) | Foreign(..) | Param(_) | + Bound(..) | + Placeholder(..) | Infer(_) | - Error => { - vec![] - } + Error => {} } } @@ -1930,7 +1963,8 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { ty::Infer(ty::TyVar(_)) => false, - ty::Infer(ty::BoundTy(_)) | + ty::Bound(..) | + ty::Placeholder(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs index 02b5d36ecc..3425203989 100644 --- a/src/librustc/ty/subst.rs +++ b/src/librustc/ty/subst.rs @@ -12,7 +12,7 @@ use hir::def_id::DefId; use infer::canonical::Canonical; -use ty::{self, BoundTyIndex, Lift, List, Ty, TyCtxt}; +use ty::{self, BoundVar, Lift, List, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use serialize::{self, Encodable, Encoder, Decodable, Decoder}; @@ -27,7 +27,7 @@ use std::marker::PhantomData; use std::mem; use std::num::NonZeroUsize; -/// An entity in the Rust typesystem, which can be one of +/// An entity in the Rust type system, which can be one of /// several kinds (only types and lifetimes for now). /// To reduce memory usage, a `Kind` is a interned pointer, /// with the lowest 2 bits being reserved for a tag to @@ -171,7 +171,7 @@ impl<'tcx> Decodable for Kind<'tcx> { pub type Substs<'tcx> = List>; impl<'a, 'gcx, 'tcx> Substs<'tcx> { - /// Creates a Substs that maps each generic parameter to itself. + /// Creates a `Substs` that maps each generic parameter to itself. pub fn identity_for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> &'tcx Substs<'tcx> { Substs::for_item(tcx, def_id, |param, _| { @@ -179,9 +179,38 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { }) } - /// Creates a Substs for generic parameter definitions, + /// Creates a `Substs` that maps each generic parameter to a higher-ranked + /// var bound at index `0`. For types, we use a `BoundVar` index equal to + /// the type parameter index. For regions, we use the `BoundRegion::BrNamed` + /// variant (which has a def-id). + pub fn bound_vars_for_item( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_id: DefId + ) -> &'tcx Substs<'tcx> { + Substs::for_item(tcx, def_id, |param, _| { + match param.kind { + ty::GenericParamDefKind::Type { .. } => { + tcx.mk_ty( + ty::Bound(ty::INNERMOST, ty::BoundTy { + var: ty::BoundVar::from(param.index), + kind: ty::BoundTyKind::Param(param.name), + }) + ).into() + } + + ty::GenericParamDefKind::Lifetime => { + tcx.mk_region(ty::RegionKind::ReLateBound( + ty::INNERMOST, + ty::BoundRegion::BrNamed(param.def_id, param.name) + )).into() + } + } + }) + } + + /// Creates a `Substs` for generic parameter definitions, /// by calling closures to obtain each kind. - /// The closures get to observe the Substs as they're + /// The closures get to observe the `Substs` as they're /// being built, which can be used to correctly /// substitute defaults of generic parameters. pub fn for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, @@ -242,7 +271,7 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { } #[inline] - pub fn types(&'a self) -> impl DoubleEndedIterator> + 'a { + pub fn types(&'a self) -> impl DoubleEndedIterator> + 'a { self.iter().filter_map(|k| { if let UnpackedKind::Type(ty) = k.unpack() { Some(ty) @@ -253,7 +282,7 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { } #[inline] - pub fn regions(&'a self) -> impl DoubleEndedIterator> + 'a { + pub fn regions(&'a self) -> impl DoubleEndedIterator> + 'a { self.iter().filter_map(|k| { if let UnpackedKind::Lifetime(lt) = k.unpack() { Some(lt) @@ -332,7 +361,7 @@ impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Substs<'tcx> {} // `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when // there is more information available (for better errors). -pub trait Subst<'tcx> : Sized { +pub trait Subst<'tcx>: Sized { fn subst<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &[Kind<'tcx>]) -> Self { self.subst_spanned(tcx, substs, None) @@ -355,7 +384,7 @@ impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T { span, root_ty: None, ty_stack_depth: 0, - region_binders_passed: 0 }; + binders_passed: 0 }; (*self).fold_with(&mut folder) } } @@ -377,16 +406,16 @@ struct SubstFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { ty_stack_depth: usize, // Number of region binders we have passed through while doing the substitution - region_binders_passed: u32, + binders_passed: u32, } impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { - self.region_binders_passed += 1; + self.binders_passed += 1; let t = t.super_fold_with(self); - self.region_binders_passed -= 1; + self.binders_passed -= 1; t } @@ -471,12 +500,12 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { } }; - self.shift_regions_through_binders(ty) + self.shift_vars_through_binders(ty) } /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs - /// when we are substituting a type with escaping regions into a context where we have passed - /// through region binders. That's quite a mouthful. Let's see an example: + /// when we are substituting a type with escaping bound vars into a context where we have + /// passed through binders. That's quite a mouthful. Let's see an example: /// /// ``` /// type Func = fn(A); @@ -516,25 +545,25 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the /// first case we do not increase the Debruijn index and in the second case we do. The reason /// is that only in the second case have we passed through a fn binder. - fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { - debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})", - ty, self.region_binders_passed, ty.has_escaping_regions()); + fn shift_vars_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { + debug!("shift_vars(ty={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})", + ty, self.binders_passed, ty.has_escaping_bound_vars()); - if self.region_binders_passed == 0 || !ty.has_escaping_regions() { + if self.binders_passed == 0 || !ty.has_escaping_bound_vars() { return ty; } - let result = ty::fold::shift_regions(self.tcx(), self.region_binders_passed, &ty); - debug!("shift_regions: shifted result = {:?}", result); + let result = ty::fold::shift_vars(self.tcx(), &ty, self.binders_passed); + debug!("shift_vars: shifted result = {:?}", result); result } fn shift_region_through_binders(&self, region: ty::Region<'tcx>) -> ty::Region<'tcx> { - if self.region_binders_passed == 0 || !region.has_escaping_regions() { + if self.binders_passed == 0 || !region.has_escaping_bound_vars() { return region; } - self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed)) + ty::fold::shift_region(self.tcx, region, self.binders_passed) } } @@ -553,15 +582,23 @@ impl CanonicalUserSubsts<'tcx> { return false; } - self.value.substs.iter().zip(BoundTyIndex::new(0)..).all(|(kind, cvar)| { + self.value.substs.iter().zip(BoundVar::new(0)..).all(|(kind, cvar)| { match kind.unpack() { UnpackedKind::Type(ty) => match ty.sty { - ty::Infer(ty::BoundTy(ref b)) => cvar == b.var, + ty::Bound(debruijn, b) => { + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(debruijn, ty::INNERMOST); + cvar == b.var + } _ => false, }, UnpackedKind::Lifetime(r) => match r { - ty::ReCanonical(cvar1) => cvar == *cvar1, + ty::ReLateBound(debruijn, br) => { + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(*debruijn, ty::INNERMOST); + cvar == br.assert_bound_var() + } _ => false, }, } diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index 00a1bfaacd..51b197d7b9 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -43,7 +43,7 @@ impl<'tcx> fmt::Display for Discr<'tcx> { match self.ty.sty { ty::Int(ity) => { let bits = ty::tls::with(|tcx| { - Integer::from_attr(tcx, SignedInt(ity)).size().bits() + Integer::from_attr(&tcx, SignedInt(ity)).size().bits() }); let x = self.val as i128; // sign extend the raw representation to be an i128 @@ -62,8 +62,8 @@ impl<'tcx> Discr<'tcx> { } pub fn checked_add<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, n: u128) -> (Self, bool) { let (int, signed) = match self.ty.sty { - Int(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true), - Uint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false), + Int(ity) => (Integer::from_attr(&tcx, SignedInt(ity)), true), + Uint(uty) => (Integer::from_attr(&tcx, UnsignedInt(uty)), false), _ => bug!("non integer discriminant"), }; @@ -303,7 +303,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Same as applying struct_tail on `source` and `target`, but only /// keeps going as long as the two types are instances of the same /// structure definitions. - /// For `(Foo>, Foo)`, the result will be `(Foo, Trait)`, + /// For `(Foo>, Foo)`, the result will be `(Foo, Trait)`, /// whereas struct_tail produces `T`, and `Trait`, respectively. pub fn struct_lockstep_tails(self, source: Ty<'tcx>, @@ -363,7 +363,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { erased_self_ty, predicates); - assert!(!erased_self_ty.has_escaping_regions()); + assert!(!erased_self_ty.has_escaping_bound_vars()); traits::elaborate_predicates(self, predicates) .filter_map(|predicate| { @@ -389,7 +389,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // construct such an object, but this seems // correct even if that code changes). let ty::OutlivesPredicate(ref t, ref r) = predicate.skip_binder(); - if t == &erased_self_ty && !r.has_escaping_regions() { + if t == &erased_self_ty && !r.has_escaping_bound_vars() { Some(*r) } else { None @@ -527,7 +527,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr } - /// True if `def_id` refers to a trait (e.g., `trait Foo { ... }`). + /// True if `def_id` refers to a trait (i.e., `trait Foo { ... }`). pub fn is_trait(self, def_id: DefId) -> bool { if let DefPathData::Trait(_) = self.def_key(def_id).disambiguated_data.data { true @@ -951,8 +951,8 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Can refer to a type which may drop. // FIXME(eddyb) check this against a ParamEnv. - ty::Dynamic(..) | ty::Projection(..) | ty::Param(_) | - ty::Opaque(..) | ty::Infer(_) | ty::Error => true, + ty::Dynamic(..) | ty::Projection(..) | ty::Param(_) | ty::Bound(..) | + ty::Placeholder(..) | ty::Opaque(..) | ty::Infer(_) | ty::Error => true, ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"), diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs index 47fbfba877..82b95b9df6 100644 --- a/src/librustc/ty/walk.rs +++ b/src/librustc/ty/walk.rs @@ -82,7 +82,7 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { match parent_ty.sty { ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str | ty::Infer(_) | ty::Param(_) | ty::Never | ty::Error | - ty::Foreign(..) => { + ty::Placeholder(..) | ty::Bound(..) | ty::Foreign(..) => { } ty::Array(ty, len) => { push_const(stack, len); diff --git a/src/librustc/ty/wf.rs b/src/librustc/ty/wf.rs index 27747970f7..6ae0793d92 100644 --- a/src/librustc/ty/wf.rs +++ b/src/librustc/ty/wf.rs @@ -158,7 +158,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { let infcx = &mut self.infcx; let param_env = self.param_env; self.out.iter() - .inspect(|pred| assert!(!pred.has_escaping_regions())) + .inspect(|pred| assert!(!pred.has_escaping_bound_vars())) .flat_map(|pred| { let mut selcx = traits::SelectionContext::new(infcx); let pred = traits::normalize(&mut selcx, param_env, cause.clone(), pred); @@ -190,7 +190,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { self.out.extend( trait_ref.substs.types() - .filter(|ty| !ty.has_escaping_regions()) + .filter(|ty| !ty.has_escaping_bound_vars()) .map(|ty| traits::Obligation::new(cause.clone(), param_env, ty::Predicate::WellFormed(ty)))); @@ -205,7 +205,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { let trait_ref = data.trait_ref(self.infcx.tcx); self.compute_trait_ref(&trait_ref, Elaborate::None); - if !data.has_escaping_regions() { + if !data.has_escaping_bound_vars() { let predicate = trait_ref.to_predicate(); let cause = self.cause(traits::ProjectionWf(data)); self.out.push(traits::Obligation::new(cause, self.param_env, predicate)); @@ -229,7 +229,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) { - if !subty.has_escaping_regions() { + if !subty.has_escaping_bound_vars() { let cause = self.cause(cause); let trait_ref = ty::TraitRef { def_id: self.infcx.tcx.require_lang_item(lang_items::SizedTraitLangItem), @@ -258,6 +258,8 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { ty::GeneratorWitness(..) | ty::Never | ty::Param(_) | + ty::Bound(..) | + ty::Placeholder(..) | ty::Foreign(..) => { // WfScalar, WfParameter, etc } @@ -299,7 +301,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { ty::Ref(r, rty, _) => { // WfReference - if !r.has_escaping_regions() && !rty.has_escaping_regions() { + if !r.has_escaping_bound_vars() && !rty.has_escaping_bound_vars() { let cause = self.cause(traits::ReferenceOutlivesReferent(ty)); self.out.push( traits::Obligation::new( @@ -450,7 +452,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { .map(|pred| traits::Obligation::new(cause.clone(), self.param_env, pred)) - .filter(|pred| !pred.has_escaping_regions()) + .filter(|pred| !pred.has_escaping_bound_vars()) .collect() } @@ -489,7 +491,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { // Note: in fact we only permit builtin traits, not `Bar<'d>`, I // am looking forward to the future here. - if !data.has_escaping_regions() { + if !data.has_escaping_bound_vars() { let implicit_bounds = object_region_bounds(self.infcx.tcx, data); diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index bcc0b8047e..7a246af82e 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -24,7 +24,6 @@ use std::sync::mpsc::{Sender}; use syntax_pos::{SpanData}; use ty::TyCtxt; use dep_graph::{DepNode}; -use proc_macro; use lazy_static; use session::Session; @@ -47,14 +46,13 @@ lazy_static! { } fn panic_hook(info: &panic::PanicInfo<'_>) { - if !proc_macro::__internal::in_sess() { - (*DEFAULT_HOOK)(info); + (*DEFAULT_HOOK)(info); - let backtrace = env::var_os("RUST_BACKTRACE").map(|x| &x != "0").unwrap_or(false); + let backtrace = env::var_os("RUST_BACKTRACE").map(|x| &x != "0").unwrap_or(false); - if backtrace { - TyCtxt::try_print_query_stack(); - } + if backtrace { + TyCtxt::try_print_query_stack(); + } #[cfg(windows)] unsafe { @@ -66,7 +64,6 @@ fn panic_hook(info: &panic::PanicInfo<'_>) { DebugBreak(); } } - } } pub fn install_panic_hook() { diff --git a/src/librustc/util/nodemap.rs b/src/librustc/util/nodemap.rs index dbd3e00d9f..6adfe2cde6 100644 --- a/src/librustc/util/nodemap.rs +++ b/src/librustc/util/nodemap.rs @@ -10,8 +10,6 @@ //! An efficient hash map for node IDs -#![allow(non_snake_case)] - use hir::def_id::DefId; use hir::{HirId, ItemLocalId}; use syntax::ast; @@ -22,9 +20,7 @@ pub use rustc_data_structures::fx::FxHashSet; macro_rules! define_id_collections { ($map_name:ident, $set_name:ident, $key:ty) => { pub type $map_name = FxHashMap<$key, T>; - pub fn $map_name() -> $map_name { Default::default() } pub type $set_name = FxHashSet<$key>; - pub fn $set_name() -> $set_name { Default::default() } } } diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 709b844526..eea3b54919 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -16,9 +16,9 @@ use ty::subst::{self, Subst}; use ty::{BrAnon, BrEnv, BrFresh, BrNamed}; use ty::{Bool, Char, Adt}; use ty::{Error, Str, Array, Slice, Float, FnDef, FnPtr}; -use ty::{Param, RawPtr, Ref, Never, Tuple}; +use ty::{Param, Bound, RawPtr, Ref, Never, Tuple}; use ty::{Closure, Generator, GeneratorWitness, Foreign, Projection, Opaque}; -use ty::{UnnormalizedProjection, Dynamic, Int, Uint, Infer}; +use ty::{Placeholder, UnnormalizedProjection, Dynamic, Int, Uint, Infer}; use ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, GenericParamCount, GenericParamDefKind}; use util::nodemap::FxHashSet; @@ -251,25 +251,17 @@ impl PrintContext { fn parameterized(&mut self, f: &mut F, substs: &subst::Substs<'_>, - mut did: DefId, + did: DefId, projections: &[ty::ProjectionPredicate<'_>]) -> fmt::Result { let key = ty::tls::with(|tcx| tcx.def_key(did)); - let mut item_name = if let Some(name) = key.disambiguated_data.data.get_opt_name() { - Some(name) - } else { - did.index = key.parent.unwrap_or_else( - || bug!("finding type for {:?}, encountered def-id {:?} with no parent", - did, did)); - self.parameterized(f, substs, did, projections)?; - return write!(f, "::{}", key.disambiguated_data.data.as_interned_str()); - }; let verbose = self.is_verbose; let mut num_supplied_defaults = 0; let mut has_self = false; let mut own_counts: GenericParamCount = Default::default(); let mut is_value_path = false; + let mut item_name = Some(key.disambiguated_data.data.as_interned_str()); let fn_trait_kind = ty::tls::with(|tcx| { // Unfortunately, some kinds of items (e.g., closures) don't have // generics. So walk back up the find the closest parent that DOES @@ -282,6 +274,7 @@ impl PrintContext { DefPathData::AssocTypeInImpl(_) | DefPathData::AssocExistentialInImpl(_) | DefPathData::Trait(_) | + DefPathData::Impl | DefPathData::TypeNs(_) => { break; } @@ -292,7 +285,6 @@ impl PrintContext { } DefPathData::CrateRoot | DefPathData::Misc | - DefPathData::Impl | DefPathData::Module(_) | DefPathData::MacroDef(_) | DefPathData::ClosureExpr | @@ -686,8 +678,8 @@ impl<'tcx> fmt::Debug for ty::ClosureUpvar<'tcx> { impl fmt::Debug for ty::UpvarId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "UpvarId({:?};`{}`;{:?})", - self.var_id, - ty::tls::with(|tcx| tcx.hir.name(tcx.hir.hir_to_node_id(self.var_id))), + self.var_path.hir_id, + ty::tls::with(|tcx| tcx.hir.name(tcx.hir.hir_to_node_id(self.var_path.hir_id))), self.closure_expr_id) } } @@ -798,12 +790,9 @@ define_print! { ty::ReEarlyBound(ref data) => { write!(f, "{}", data.name) } - ty::ReCanonical(_) => { - write!(f, "'_") - } ty::ReLateBound(_, br) | ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | - ty::RePlaceholder(ty::Placeholder { name: br, .. }) => { + ty::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => { write!(f, "{}", br) } ty::ReScope(scope) if cx.identify_regions => { @@ -868,10 +857,6 @@ define_print! { write!(f, "{:?}", vid) } - ty::ReCanonical(c) => { - write!(f, "'?{}", c.index()) - } - ty::RePlaceholder(placeholder) => { write!(f, "RePlaceholder({:?})", placeholder) } @@ -984,7 +969,6 @@ define_print! { ty::TyVar(_) => write!(f, "_"), ty::IntVar(_) => write!(f, "{}", "{integer}"), ty::FloatVar(_) => write!(f, "{}", "{float}"), - ty::BoundTy(_) => write!(f, "_"), ty::FreshTy(v) => write!(f, "FreshTy({})", v), ty::FreshIntTy(v) => write!(f, "FreshIntTy({})", v), ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v) @@ -996,7 +980,6 @@ define_print! { ty::TyVar(ref v) => write!(f, "{:?}", v), ty::IntVar(ref v) => write!(f, "{:?}", v), ty::FloatVar(ref v) => write!(f, "{:?}", v), - ty::BoundTy(v) => write!(f, "?{:?}", v.var.index()), ty::FreshTy(v) => write!(f, "FreshTy({:?})", v), ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v), ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v) @@ -1127,6 +1110,19 @@ define_print! { Infer(infer_ty) => write!(f, "{}", infer_ty), Error => write!(f, "[type error]"), Param(ref param_ty) => write!(f, "{}", param_ty), + Bound(debruijn, bound_ty) => { + match bound_ty.kind { + ty::BoundTyKind::Anon => { + if debruijn == ty::INNERMOST { + write!(f, "^{}", bound_ty.var.index()) + } else { + write!(f, "^{}_{}", debruijn.index(), bound_ty.var.index()) + } + } + + ty::BoundTyKind::Param(p) => write!(f, "{}", p), + } + } Adt(def, substs) => cx.parameterized(f, substs, def.did, &[]), Dynamic(data, r) => { let r = r.print_to_string(cx); @@ -1148,6 +1144,9 @@ define_print! { data.print(f, cx)?; write!(f, ")") } + Placeholder(placeholder) => { + write!(f, "Placeholder({:?})", placeholder) + } Opaque(def_id, substs) => { if cx.is_verbose { return write!(f, "Opaque({:?}, {:?})", def_id, substs); diff --git a/src/librustc/util/profiling.rs b/src/librustc/util/profiling.rs index 37073b6e82..bea3453b31 100644 --- a/src/librustc/util/profiling.rs +++ b/src/librustc/util/profiling.rs @@ -12,7 +12,7 @@ use session::config::Options; use std::fs; use std::io::{self, StdoutLock, Write}; -use std::time::Instant; +use std::time::{Duration, Instant}; macro_rules! define_categories { ($($name:ident,)*) => { @@ -93,16 +93,27 @@ macro_rules! define_categories { $( let (hits, total) = self.query_counts.$name; + //normalize hits to 0% + let hit_percent = + if total > 0 { + ((hits as f32) / (total as f32)) * 100.0 + } else { + 0.0 + }; + json.push_str(&format!( - "{{ \"category\": {}, \"time_ms\": {}, - \"query_count\": {}, \"query_hits\": {} }}", + "{{ \"category\": \"{}\", \"time_ms\": {},\ + \"query_count\": {}, \"query_hits\": {} }},", stringify!($name), self.times.$name / 1_000_000, total, - format!("{:.2}", (((hits as f32) / (total as f32)) * 100.0)) + format!("{:.2}", hit_percent) )); )* + //remove the trailing ',' character + json.pop(); + json.push(']'); json @@ -197,7 +208,20 @@ impl SelfProfiler { } fn stop_timer(&mut self) -> u64 { - let elapsed = self.current_timer.elapsed(); + let elapsed = if cfg!(windows) { + // On Windows, timers don't always appear to be monotonic (see #51648) + // which can lead to panics when calculating elapsed time. + // Work around this by testing to see if the current time is less than + // our recorded time, and if it is, just returning 0. + let now = Instant::now(); + if self.current_timer >= now { + Duration::new(0, 0) + } else { + self.current_timer.elapsed() + } + } else { + self.current_timer.elapsed() + }; self.current_timer = Instant::now(); diff --git a/src/librustc_apfloat/Cargo.toml b/src/librustc_apfloat/Cargo.toml index 735b74f156..a8a5da90c7 100644 --- a/src/librustc_apfloat/Cargo.toml +++ b/src/librustc_apfloat/Cargo.toml @@ -10,3 +10,4 @@ path = "lib.rs" [dependencies] bitflags = "1.0" rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } +smallvec = { version = "0.6.5", features = ["union"] } diff --git a/src/librustc_apfloat/ieee.rs b/src/librustc_apfloat/ieee.rs index 87d59d2e76..adcb9857ee 100644 --- a/src/librustc_apfloat/ieee.rs +++ b/src/librustc_apfloat/ieee.rs @@ -11,6 +11,7 @@ use {Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; use {Float, FloatConvert, ParseError, Round, Status, StatusAnd}; +use smallvec::{SmallVec, smallvec}; use std::cmp::{self, Ordering}; use std::convert::TryFrom; use std::fmt::{self, Write}; @@ -894,7 +895,7 @@ impl Float for IeeeFloat { } // The intermediate result of the multiplication has "2 * S::PRECISION" - // signicant bit; adjust the addend to be consistent with mul result. + // significant bit; adjust the addend to be consistent with mul result. let mut ext_addend_sig = [addend.sig[0], 0]; // Extend the addend significand to ext_precision - 1. This guarantees @@ -919,7 +920,7 @@ impl Float for IeeeFloat { // Convert the result having "2 * S::PRECISION" significant-bits back to the one // having "S::PRECISION" significant-bits. First, move the radix point from - // poision "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be + // position "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be // adjusted by "2*S::PRECISION - 1" - "S::PRECISION - 1" = "S::PRECISION". self.exp -= S::PRECISION as ExpInt + 1; @@ -1962,7 +1963,7 @@ impl IeeeFloat { // to hold the full significand, and an extra limb required by // tcMultiplyPart. let max_limbs = limbs_for_bits(1 + 196 * significand_digits / 59); - let mut dec_sig = Vec::with_capacity(max_limbs); + let mut dec_sig: SmallVec<[Limb; 1]> = SmallVec::with_capacity(max_limbs); // Convert to binary efficiently - we do almost all multiplication // in a Limb. When this would overflow do we do a single @@ -2021,11 +2022,11 @@ impl IeeeFloat { const FIRST_EIGHT_POWERS: [Limb; 8] = [1, 5, 25, 125, 625, 3125, 15625, 78125]; - let mut p5_scratch = vec![]; - let mut p5 = vec![FIRST_EIGHT_POWERS[4]]; + let mut p5_scratch = smallvec![]; + let mut p5: SmallVec<[Limb; 1]> = smallvec![FIRST_EIGHT_POWERS[4]]; - let mut r_scratch = vec![]; - let mut r = vec![FIRST_EIGHT_POWERS[power & 7]]; + let mut r_scratch = smallvec![]; + let mut r: SmallVec<[Limb; 1]> = smallvec![FIRST_EIGHT_POWERS[power & 7]]; power >>= 3; while power > 0 { @@ -2064,7 +2065,7 @@ impl IeeeFloat { let calc_precision = (LIMB_BITS << attempt) - 1; attempt += 1; - let calc_normal_from_limbs = |sig: &mut Vec, + let calc_normal_from_limbs = |sig: &mut SmallVec<[Limb; 1]>, limbs: &[Limb]| -> StatusAnd { sig.resize(limbs_for_bits(calc_precision), 0); diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs index 6ea722ba76..69c9f38540 100644 --- a/src/librustc_apfloat/lib.rs +++ b/src/librustc_apfloat/lib.rs @@ -53,6 +53,7 @@ extern crate rustc_cratesio_shim; #[macro_use] extern crate bitflags; +extern crate smallvec; use std::cmp::Ordering; use std::fmt; diff --git a/src/librustc_asan/Cargo.toml b/src/librustc_asan/Cargo.toml index 34d8b75a5b..734564c2d8 100644 --- a/src/librustc_asan/Cargo.toml +++ b/src/librustc_asan/Cargo.toml @@ -15,6 +15,5 @@ cmake = "0.1.18" [dependencies] alloc = { path = "../liballoc" } -alloc_system = { path = "../liballoc_system" } core = { path = "../libcore" } compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/librustc_asan/lib.rs b/src/librustc_asan/lib.rs index 7b845e631f..47f917e40c 100644 --- a/src/librustc_asan/lib.rs +++ b/src/librustc_asan/lib.rs @@ -9,7 +9,6 @@ // except according to those terms. #![sanitizer_runtime] -#![feature(alloc_system)] #![feature(nll)] #![feature(sanitizer_runtime)] #![feature(staged_api)] @@ -17,10 +16,3 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] - -extern crate alloc_system; - -use alloc_system::System; - -#[global_allocator] -static ALLOC: System = System; diff --git a/src/librustc_borrowck/borrowck/README.md b/src/librustc_borrowck/borrowck/README.md index 8bc0b4969b..a05c56e362 100644 --- a/src/librustc_borrowck/borrowck/README.md +++ b/src/librustc_borrowck/borrowck/README.md @@ -3,7 +3,7 @@ > WARNING: This README is more or less obsolete, and will be removed > soon! The new system is described in the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/mir/borrowck.html This pass has the job of enforcing memory safety. This is a subtle topic. This docs aim to explain both the practice and the theory diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index d9b6452770..a802729e3f 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -377,6 +377,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // by-move upvars, which is local data for generators Categorization::Upvar(..) => true, + Categorization::ThreadLocal(region) | Categorization::Rvalue(region) => { // Rvalues promoted to 'static are no longer local if let RegionKind::ReStatic = *region { @@ -425,7 +426,6 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // These cannot exist in borrowck RegionKind::ReVar(..) | - RegionKind::ReCanonical(..) | RegionKind::RePlaceholder(..) | RegionKind::ReClosureBound(..) | RegionKind::ReErased => span_bug!(borrow_span, diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index ffc4fbfb4c..7bb5f41175 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -177,6 +177,7 @@ fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, match cmt.cat { Categorization::Deref(_, mc::BorrowedPtr(..)) | Categorization::Deref(_, mc::UnsafePtr(..)) | + Categorization::ThreadLocal(..) | Categorization::StaticItem => { Some(cmt.clone()) } diff --git a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs index c9dcc0d9fa..6ef5d65d10 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs @@ -70,6 +70,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { match cmt.cat { Categorization::Rvalue(..) | + Categorization::ThreadLocal(..) | Categorization::Local(..) | // L-Local Categorization::Upvar(..) | Categorization::Deref(_, mc::BorrowedPtr(..)) | // L-Deref-Borrowed @@ -105,6 +106,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { //! rooting etc, and presuming `cmt` is not mutated. match cmt.cat { + Categorization::ThreadLocal(temp_scope) | Categorization::Rvalue(temp_scope) => { temp_scope } diff --git a/src/librustc_borrowck/borrowck/gather_loans/mod.rs b/src/librustc_borrowck/borrowck/gather_loans/mod.rs index 1f83c30a38..21fb0cdf90 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/mod.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/mod.rs @@ -363,7 +363,6 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { ty::ReStatic => self.item_ub, - ty::ReCanonical(_) | ty::ReEmpty | ty::ReClosureBound(..) | ty::ReLateBound(..) | @@ -454,8 +453,8 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } None } - LpUpvar(ty::UpvarId{ var_id, closure_expr_id: _ }) => { - self.bccx.used_mut_nodes.borrow_mut().insert(var_id); + LpUpvar(ty::UpvarId{ var_path: ty::UpvarPath { hir_id }, closure_expr_id: _ }) => { + self.bccx.used_mut_nodes.borrow_mut().insert(hir_id); None } LpExtend(ref base, mc::McInherited, LpDeref(pointer_kind)) | diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index b29ab55f9b..cfd530b7e3 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -97,7 +97,7 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &[MoveErr } } if let NoteClosureEnv(upvar_id) = error.move_from.note { - let var_node_id = bccx.tcx.hir.hir_to_node_id(upvar_id.var_id); + let var_node_id = bccx.tcx.hir.hir_to_node_id(upvar_id.var_path.hir_id); err.span_label(bccx.tcx.hir.span(var_node_id), "captured outer variable"); } @@ -145,6 +145,8 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &'a BorrowckCtxt<'a, 'tcx>, match move_from.cat { Categorization::Deref(_, mc::BorrowedPtr(..)) | Categorization::Deref(_, mc::UnsafePtr(..)) | + Categorization::Deref(_, mc::Unique) | + Categorization::ThreadLocal(..) | Categorization::StaticItem => { bccx.cannot_move_out_of( move_from.span, &move_from.descriptive_string(bccx.tcx), Origin::Ast) @@ -166,7 +168,10 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &'a BorrowckCtxt<'a, 'tcx>, } } } - _ => { + + Categorization::Rvalue(..) | + Categorization::Local(..) | + Categorization::Upvar(..) => { span_bug!(move_from.span, "this path should not cause illegal move"); } } diff --git a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs index d9784cc217..52c7ebb4be 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs @@ -70,6 +70,12 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { RestrictionResult::Safe } + Categorization::ThreadLocal(..) => { + // Thread-locals are statics that have a scope, with + // no underlying structure to provide restrictions. + RestrictionResult::Safe + } + Categorization::Local(local_id) => { // R-Variable, locally declared let lp = new_lp(LpVar(local_id)); diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index bf8d023130..d189460d08 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -520,6 +520,7 @@ pub fn opt_loan_path_is_field<'tcx>(cmt: &mc::cmt_<'tcx>) -> (Option { (None, false) } @@ -845,7 +846,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { MutabilityViolation => { let mut db = self.cannot_assign(error_span, &descr, Origin::Ast); if let mc::NoteClosureEnv(upvar_id) = err.cmt.note { - let node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); + let node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_path.hir_id); let sp = self.tcx.hir.span(node_id); let fn_closure_msg = "`Fn` closures cannot capture their enclosing \ environment for modifications"; @@ -1414,7 +1415,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { loan_path: &LoanPath<'tcx>, out: &mut String) { match loan_path.kind { - LpUpvar(ty::UpvarId { var_id: id, closure_expr_id: _ }) => { + LpUpvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id: id}, closure_expr_id: _ }) => { out.push_str(&self.tcx.hir.name(self.tcx.hir.hir_to_node_id(id)).as_str()); } LpVar(id) => { @@ -1532,7 +1533,7 @@ impl<'tcx> fmt::Debug for LoanPath<'tcx> { write!(f, "$({})", ty::tls::with(|tcx| tcx.hir.node_to_string(id))) } - LpUpvar(ty::UpvarId{ var_id, closure_expr_id }) => { + LpUpvar(ty::UpvarId{ var_path: ty::UpvarPath {hir_id: var_id}, closure_expr_id }) => { let s = ty::tls::with(|tcx| { let var_node_id = tcx.hir.hir_to_node_id(var_id); tcx.hir.node_to_string(var_node_id) @@ -1567,9 +1568,9 @@ impl<'tcx> fmt::Display for LoanPath<'tcx> { write!(f, "$({})", ty::tls::with(|tcx| tcx.hir.node_to_user_string(id))) } - LpUpvar(ty::UpvarId{ var_id, closure_expr_id: _ }) => { + LpUpvar(ty::UpvarId{ var_path: ty::UpvarPath { hir_id }, closure_expr_id: _ }) => { let s = ty::tls::with(|tcx| { - let var_node_id = tcx.hir.hir_to_node_id(var_id); + let var_node_id = tcx.hir.hir_to_node_id(hir_id); tcx.hir.node_to_string(var_node_id) }); write!(f, "$({} captured by closure)", s) diff --git a/src/librustc_codegen_llvm/README.md b/src/librustc_codegen_llvm/README.md index 8d1c9a52b2..dda2e5ac18 100644 --- a/src/librustc_codegen_llvm/README.md +++ b/src/librustc_codegen_llvm/README.md @@ -4,4 +4,4 @@ that runs towards the end of the compilation process. For more information about how codegen works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/codegen.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/codegen.html diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 7b93d3e795..5b6d157043 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -9,18 +9,20 @@ // except according to those terms. use llvm::{self, AttributePlace}; -use base; -use builder::{Builder, MemFlags}; -use common::{ty_fn_sig, C_usize}; +use rustc_codegen_ssa::MemFlags; +use builder::Builder; use context::CodegenCx; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; +use rustc_target::abi::call::ArgType; -use rustc_target::abi::{LayoutOf, Size, TyLayout}; -use rustc::ty::{self, Ty}; +use rustc_codegen_ssa::traits::*; + +use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi}; +use rustc::ty::{self, Ty, Instance}; use rustc::ty::layout; use libc::c_uint; @@ -71,7 +73,7 @@ impl ArgAttributesExt for ArgAttributes { if let Some(align) = self.pointee_align { llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), - align.abi() as u32); + align.bytes() as u32); } regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); } @@ -96,7 +98,7 @@ impl ArgAttributesExt for ArgAttributes { if let Some(align) = self.pointee_align { llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, idx.as_uint(), - align.abi() as u32); + align.bytes() as u32); } regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); } @@ -110,16 +112,16 @@ pub trait LlvmType { impl LlvmType for Reg { fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { match self.kind { - RegKind::Integer => Type::ix(cx, self.size.bits()), + RegKind::Integer => cx.type_ix(self.size.bits()), RegKind::Float => { match self.size.bits() { - 32 => Type::f32(cx), - 64 => Type::f64(cx), + 32 => cx.type_f32(), + 64 => cx.type_f64(), _ => bug!("unsupported float: {:?}", self) } } RegKind::Vector => { - Type::vector(Type::i8(cx), self.size.bytes()) + cx.type_vector(cx.type_i8(), self.size.bytes()) } } } @@ -143,7 +145,7 @@ impl LlvmType for CastTarget { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return Type::array(rest_ll_unit, rest_count); + return cx.type_array(rest_ll_unit, rest_count); } } @@ -158,17 +160,27 @@ impl LlvmType for CastTarget { if rem_bytes != 0 { // Only integers can be really split further. assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(Type::ix(cx, rem_bytes * 8)); + args.push(cx.type_ix(rem_bytes * 8)); } - Type::struct_(cx, &args, false) + cx.type_struct(&args, false) } } pub trait ArgTypeExt<'ll, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>); - fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>); + fn store( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value>, + ); + fn store_fn_arg( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + idx: &mut usize, + dst: PlaceRef<'tcx, &'ll Value>, + ); } impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { @@ -182,13 +194,17 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) { + fn store( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value>, + ) { if self.is_ignore() { return; } - let cx = bx.cx; if self.is_sized_indirect() { - OperandValue::Ref(val, None, self.layout.align).store(bx, dst) + OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) } else if self.is_unsized_indirect() { bug!("unsized ArgType must be handled through store_fn_arg"); } else if let PassMode::Cast(cast) = self.mode { @@ -196,8 +212,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to()); - bx.store(val, cast_dst, self.layout.align); + let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx)); + let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); + bx.store(val, cast_dst, self.layout.align.abi); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -214,21 +231,23 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let scratch_size = cast.size(cx); - let scratch_align = cast.align(cx); - let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align); + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... bx.store(val, llscratch, scratch_align); // ...and then memcpy it to the intended destination. - base::call_memcpy(bx, - bx.pointercast(dst.llval, Type::i8p(cx)), - bx.pointercast(llscratch, Type::i8p(cx)), - C_usize(cx, self.layout.size.bytes()), - self.layout.align.min(scratch_align), - MemFlags::empty()); + bx.memcpy( + dst.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(self.layout.size.bytes()), + MemFlags::empty() + ); bx.lifetime_end(llscratch, scratch_size); } @@ -237,7 +256,12 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } - fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) { + fn store_fn_arg( + &self, + bx: &mut Builder<'a, 'll, 'tcx>, + idx: &mut usize, + dst: PlaceRef<'tcx, &'ll Value>, + ) { let mut next = || { let val = llvm::get_param(bx.llfn(), *idx as c_uint); *idx += 1; @@ -249,7 +273,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { OperandValue::Pair(next(), next()).store(bx, dst); } PassMode::Indirect(_, Some(_)) => { - OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst); + OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); } PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { self.store(bx, next(), dst); @@ -258,6 +282,27 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } +impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn store_fn_arg( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, dst: PlaceRef<'tcx, Self::Value> + ) { + ty.store_fn_arg(self, idx, dst) + } + fn store_arg_ty( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ) { + ty.store(self, val, dst) + } + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.memory_ty(self) + } +} + pub trait FnTypeExt<'tcx> { fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self; fn new(cx: &CodegenCx<'ll, 'tcx>, @@ -276,15 +321,15 @@ pub trait FnTypeExt<'tcx> { cx: &CodegenCx<'ll, 'tcx>, abi: Abi); fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value); + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value); } impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self { - let fn_ty = instance.ty(cx.tcx); - let sig = ty_fn_sig(cx, fn_ty); + let sig = instance.fn_sig(cx.tcx); let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); FnType::new(cx, sig, &[]) } @@ -303,21 +348,49 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| { let mut layout = cx.layout_of(ty); // Don't pass the vtable, it's not an argument of the virtual fn. - // Instead, pass just the (thin pointer) first field of `*dyn Trait`. + // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait` + // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen if arg_idx == Some(0) { - if layout.is_unsized() { - unimplemented!("by-value trait object is not \ - yet implemented in #![feature(unsized_locals)]"); - } - // FIXME(eddyb) `layout.field(cx, 0)` is not enough because e.g. - // `Box` has a few newtype wrappers around the raw - // pointer, so we'd have to "dig down" to find `*dyn Trait`. - let pointee = layout.ty.builtin_deref(true) - .unwrap_or_else(|| { - bug!("FnType::new_vtable: non-pointer self {:?}", layout) - }).ty; - let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee); - layout = cx.layout_of(fat_ptr_ty).field(cx, 0); + let fat_pointer_ty = if layout.is_unsized() { + // unsized `self` is passed as a pointer to `self` + // FIXME (mikeyhew) change this to use &own if it is ever added to the language + cx.tcx.mk_mut_ptr(layout.ty) + } else { + match layout.abi { + LayoutAbi::ScalarPair(..) => (), + _ => bug!("receiver type has unsupported layout: {:?}", layout) + } + + // In the case of Rc, we need to explicitly pass a *mut RcBox + // with a Scalar (not ScalarPair) ABI. This is a hack that is understood + // elsewhere in the compiler as a method on a `dyn Trait`. + // To get the type `*mut RcBox`, we just keep unwrapping newtypes until we + // get a built-in pointer type + let mut fat_pointer_layout = layout; + 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr() + && !fat_pointer_layout.ty.is_region_ptr() + { + 'iter_fields: for i in 0..fat_pointer_layout.fields.count() { + let field_layout = fat_pointer_layout.field(cx, i); + + if !field_layout.is_zst() { + fat_pointer_layout = field_layout; + continue 'descend_newtypes + } + } + + bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout); + } + + fat_pointer_layout.ty + }; + + // we now have a type like `*mut RcBox` + // change its layout to that of `*mut ()`, a thin pointer, but keep the same type + // this is understood as a special case elsewhere in the compiler + let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit()); + layout = cx.layout_of(unit_pointer_ty); + layout.ty = fat_pointer_ty; } ArgType::new(layout) }) @@ -472,7 +545,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { adjust_for_rust_scalar(&mut b_attrs, b, arg.layout, - a.value.size(cx).abi_align(b.value.align(cx)), + a.value.size(cx).align_to(b.value.align(cx).abi), false); arg.mode = PassMode::Pair(a_attrs, b_attrs); return arg; @@ -585,14 +658,14 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { ); let llreturn_ty = match self.ret.mode { - PassMode::Ignore => Type::void(cx), + PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(self.ret.memory_ty(cx).ptr_to()); - Type::void(cx) + llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() } }; @@ -618,15 +691,22 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(), + PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } if self.variadic { - Type::variadic_func(&llargument_tys, llreturn_ty) + cx.type_variadic_func(&llargument_tys, llreturn_ty) } else { - Type::func(&llargument_tys, llreturn_ty) + cx.type_func(&llargument_tys, llreturn_ty) + } + } + + fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + unsafe { + llvm::LLVMPointerType(self.llvm_type(cx), + cx.data_layout().instruction_address_space as c_uint) } } @@ -681,7 +761,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { let mut i = 0; let mut apply = |attrs: &ArgAttributes| { attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); @@ -700,7 +780,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { // by the LLVM verifier. if let layout::Int(..) = scalar.value { if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx.cx); + let range = scalar.valid_range_exclusive(bx); if range.start != range.end { bx.range_metadata(callsite, range); } @@ -733,3 +813,29 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } } + +impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> { + FnType::new(&self, sig, extra_args) + } + fn new_vtable( + &self, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>] + ) -> FnType<'tcx, Ty<'tcx>> { + FnType::new_vtable(&self, sig, extra_args) + } + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> { + FnType::of_instance(&self, instance) + } +} + +impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn apply_attrs_callsite( + &mut self, + ty: &FnType<'tcx, Ty<'tcx>>, + callsite: Self::Value + ) { + ty.apply_attrs_callsite(self, callsite) + } +} diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index f1bb41bceb..294596cea5 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -9,126 +9,123 @@ // except according to those terms. use llvm; -use common::*; -use type_::Type; +use context::CodegenCx; use type_of::LayoutLlvmExt; use builder::Builder; use value::Value; use rustc::hir; +use rustc_codegen_ssa::traits::*; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use std::ffi::CString; -use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; -// Take an inline assembly expression and splat it out via LLVM -pub fn codegen_inline_asm( - bx: &Builder<'a, 'll, 'tcx>, - ia: &hir::InlineAsm, - outputs: Vec>, - mut inputs: Vec<&'ll Value> -) -> bool { - let mut ext_constraints = vec![]; - let mut output_types = vec![]; - // Prepare the output operands - let mut indirect_outputs = vec![]; - for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { - if out.is_rw { - inputs.push(place.load(bx).immediate()); - ext_constraints.push(i.to_string()); +impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn codegen_inline_asm( + &mut self, + ia: &hir::InlineAsm, + outputs: Vec>, + mut inputs: Vec<&'ll Value> + ) -> bool { + let mut ext_constraints = vec![]; + let mut output_types = vec![]; + + // Prepare the output operands + let mut indirect_outputs = vec![]; + for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() { + if out.is_rw { + inputs.push(self.load_operand(place).immediate()); + ext_constraints.push(i.to_string()); + } + if out.is_indirect { + indirect_outputs.push(self.load_operand(place).immediate()); + } else { + output_types.push(place.layout.llvm_type(self.cx())); + } } - if out.is_indirect { - indirect_outputs.push(place.load(bx).immediate()); - } else { - output_types.push(place.layout.llvm_type(bx.cx)); + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; } + + let clobbers = ia.clobbers.iter() + .map(|s| format!("~{{{}}}", &s)); + + // Default per-arch clobbers + // Basically what clang does + let arch_clobbers = match &self.sess().target.target.arch[..] { + "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], + "mips" | "mips64" => vec!["~{$1}"], + _ => Vec::new() + }; + + let all_constraints = + ia.outputs.iter().map(|out| out.constraint.to_string()) + .chain(ia.inputs.iter().map(|s| s.to_string())) + .chain(ext_constraints) + .chain(clobbers) + .chain(arch_clobbers.iter().map(|s| s.to_string())) + .collect::>().join(","); + + debug!("Asm Constraints: {}", &all_constraints); + + // Depending on how many outputs we have, the return type is different + let num_outputs = output_types.len(); + let output_type = match num_outputs { + 0 => self.type_void(), + 1 => output_types[0], + _ => self.type_struct(&output_types, false) + }; + + let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); + let constraint_cstr = CString::new(all_constraints).unwrap(); + let r = self.inline_asm_call( + &asm, + &constraint_cstr, + &inputs, + output_type, + ia.volatile, + ia.alignstack, + ia.dialect + ); + if r.is_none() { + return false; + } + let r = r.unwrap(); + + // Again, based on how many outputs we have + let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); + for (i, (_, &place)) in outputs.enumerate() { + let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; + OperandValue::Immediate(v).store(self, place); + } + + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + unsafe { + let key = "srcloc"; + let kind = llvm::LLVMGetMDKindIDInContext(self.llcx, + key.as_ptr() as *const c_char, key.len() as c_uint); + + let val: &'ll Value = self.const_i32(ia.ctxt.outer().as_u32() as i32); + + llvm::LLVMSetMetadata(r, kind, + llvm::LLVMMDNodeInContext(self.llcx, &val, 1)); + } + + true } - if !indirect_outputs.is_empty() { - indirect_outputs.extend_from_slice(&inputs); - inputs = indirect_outputs; - } - - let clobbers = ia.clobbers.iter() - .map(|s| format!("~{{{}}}", &s)); - - // Default per-arch clobbers - // Basically what clang does - let arch_clobbers = match &bx.sess().target.target.arch[..] { - "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], - "mips" | "mips64" => vec!["~{$1}"], - _ => Vec::new() - }; - - let all_constraints = - ia.outputs.iter().map(|out| out.constraint.to_string()) - .chain(ia.inputs.iter().map(|s| s.to_string())) - .chain(ext_constraints) - .chain(clobbers) - .chain(arch_clobbers.iter().map(|s| s.to_string())) - .collect::>().join(","); - - debug!("Asm Constraints: {}", &all_constraints); - - // Depending on how many outputs we have, the return type is different - let num_outputs = output_types.len(); - let output_type = match num_outputs { - 0 => Type::void(bx.cx), - 1 => output_types[0], - _ => Type::struct_(bx.cx, &output_types, false) - }; - - let dialect = match ia.dialect { - AsmDialect::Att => llvm::AsmDialect::Att, - AsmDialect::Intel => llvm::AsmDialect::Intel, - }; - - let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); - let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = bx.inline_asm_call( - asm.as_ptr(), - constraint_cstr.as_ptr(), - &inputs, - output_type, - ia.volatile, - ia.alignstack, - dialect - ); - if r.is_none() { - return false; - } - let r = r.unwrap(); - - // Again, based on how many outputs we have - let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, &place)) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) }; - OperandValue::Immediate(v).store(bx, place); - } - - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - unsafe { - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx, - key.as_ptr() as *const c_char, key.len() as c_uint); - - let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); - - llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1)); - } - - return true; } -pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ga: &hir::GlobalAsm) { - let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); - unsafe { - llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr()); +impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn codegen_global_asm(&self, ga: &hir::GlobalAsm) { + let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); + unsafe { + llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr()); + } } } diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index f45b3728bc..38ab1302cf 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -21,6 +21,7 @@ use rustc::ty::query::Providers; use rustc_data_structures::sync::Lrc; use rustc_data_structures::fx::FxHashMap; use rustc_target::spec::PanicStrategy; +use rustc_codegen_ssa::traits::*; use attributes; use llvm::{self, Attribute}; diff --git a/src/librustc_codegen_llvm/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs index af9efc6d7c..76c5071163 100644 --- a/src/librustc_codegen_llvm/back/archive.rs +++ b/src/librustc_codegen_llvm/back/archive.rs @@ -18,6 +18,7 @@ use std::ptr; use std::str; use back::bytecode::RLIB_BYTECODE_EXTENSION; +use rustc_codegen_ssa::back::archive::find_library; use libc; use llvm::archive_ro::{ArchiveRO, Child}; use llvm::{self, ArchiveKind}; @@ -52,29 +53,6 @@ enum Addition { }, } -pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) - -> PathBuf { - // On Windows, static libraries sometimes show up as libfoo.a and other - // times show up as foo.lib - let oslibname = format!("{}{}{}", - sess.target.target.options.staticlib_prefix, - name, - sess.target.target.options.staticlib_suffix); - let unixlibname = format!("lib{}.a", name); - - for path in search_paths { - debug!("looking for {} inside {:?}", name, path); - let test = path.join(&oslibname); - if test.exists() { return test } - if oslibname != unixlibname { - let test = path.join(&unixlibname); - if test.exists() { return test } - } - } - sess.fatal(&format!("could not find native static library `{}`, \ - perhaps an -L flag is missing?", name)); -} - fn is_relevant_child(c: &Child) -> bool { match c.name() { Some(name) => !name.contains("SYMDEF"), @@ -105,15 +83,16 @@ impl<'a> ArchiveBuilder<'a> { if self.src_archive().is_none() { return Vec::new() } + let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap(); - let ret = archive.iter() - .filter_map(|child| child.ok()) - .filter(is_relevant_child) - .filter_map(|child| child.name()) - .filter(|name| !self.removals.iter().any(|x| x == name)) - .map(|name| name.to_string()) - .collect(); - return ret; + + archive.iter() + .filter_map(|child| child.ok()) + .filter(is_relevant_child) + .filter_map(|child| child.name()) + .filter(|name| !self.removals.iter().any(|x| x == name)) + .map(|name| name.to_owned()) + .collect() } fn src_archive(&mut self) -> Option<&ArchiveRO> { @@ -193,7 +172,7 @@ impl<'a> ArchiveBuilder<'a> { let name = file.file_name().unwrap().to_str().unwrap(); self.additions.push(Addition::File { path: file.to_path_buf(), - name_in_archive: name.to_string(), + name_in_archive: name.to_owned(), }); } @@ -206,13 +185,8 @@ impl<'a> ArchiveBuilder<'a> { /// Combine the provided files, rlibs, and native libraries into a single /// `Archive`. pub fn build(&mut self) { - let kind = match self.llvm_archive_kind() { - Ok(kind) => kind, - Err(kind) => { - self.config.sess.fatal(&format!("Don't know how to build archive of type: {}", - kind)); - } - }; + let kind = self.llvm_archive_kind().unwrap_or_else(|kind| + self.config.sess.fatal(&format!("Don't know how to build archive of type: {}", kind))); if let Err(e) = self.build_with_llvm(kind) { self.config.sess.fatal(&format!("failed to build archive: {}", e)); @@ -303,10 +277,9 @@ impl<'a> ArchiveBuilder<'a> { let ret = if r.into_result().is_err() { let err = llvm::LLVMRustGetLastError(); let msg = if err.is_null() { - "failed to write archive".to_string() + "failed to write archive".into() } else { String::from_utf8_lossy(CStr::from_ptr(err).to_bytes()) - .into_owned() }; Err(io::Error::new(io::ErrorKind::Other, msg)) } else { @@ -315,7 +288,7 @@ impl<'a> ArchiveBuilder<'a> { for member in members { llvm::LLVMRustArchiveMemberFree(member); } - return ret + ret } } } diff --git a/src/librustc_codegen_llvm/back/bytecode.rs b/src/librustc_codegen_llvm/back/bytecode.rs index 9a3dd9d2f8..0b264de18c 100644 --- a/src/librustc_codegen_llvm/back/bytecode.rs +++ b/src/librustc_codegen_llvm/back/bytecode.rs @@ -42,7 +42,7 @@ use flate2::write::DeflateEncoder; // This is the "magic number" expected at the beginning of a LLVM bytecode // object in an rlib. -pub const RLIB_BYTECODE_OBJECT_MAGIC: &'static [u8] = b"RUST_OBJECT"; +pub const RLIB_BYTECODE_OBJECT_MAGIC: &[u8] = b"RUST_OBJECT"; // The version number this compiler will write to bytecode objects in rlibs pub const RLIB_BYTECODE_OBJECT_VERSION: u8 = 2; @@ -106,39 +106,39 @@ pub struct DecodedBytecode<'a> { } impl<'a> DecodedBytecode<'a> { - pub fn new(data: &'a [u8]) -> Result, String> { + pub fn new(data: &'a [u8]) -> Result, &'static str> { if !data.starts_with(RLIB_BYTECODE_OBJECT_MAGIC) { - return Err("magic bytecode prefix not found".to_string()) + return Err("magic bytecode prefix not found") } let data = &data[RLIB_BYTECODE_OBJECT_MAGIC.len()..]; if !data.starts_with(&[RLIB_BYTECODE_OBJECT_VERSION, 0, 0, 0]) { - return Err("wrong version prefix found in bytecode".to_string()) + return Err("wrong version prefix found in bytecode") } let data = &data[4..]; if data.len() < 4 { - return Err("bytecode corrupted".to_string()) + return Err("bytecode corrupted") } let identifier_len = unsafe { u32::from_le(ptr::read_unaligned(data.as_ptr() as *const u32)) as usize }; let data = &data[4..]; if data.len() < identifier_len { - return Err("bytecode corrupted".to_string()) + return Err("bytecode corrupted") } let identifier = match str::from_utf8(&data[..identifier_len]) { Ok(s) => s, - Err(_) => return Err("bytecode corrupted".to_string()) + Err(_) => return Err("bytecode corrupted") }; let data = &data[identifier_len..]; if data.len() < 8 { - return Err("bytecode corrupted".to_string()) + return Err("bytecode corrupted") } let bytecode_len = unsafe { u64::from_le(ptr::read_unaligned(data.as_ptr() as *const u64)) as usize }; let data = &data[8..]; if data.len() < bytecode_len { - return Err("bytecode corrupted".to_string()) + return Err("bytecode corrupted") } let encoded_bytecode = &data[..bytecode_len]; diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 86c6a5e65b..68da14570e 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -9,22 +9,23 @@ // except according to those terms. use back::wasm; -use cc::windows_registry; use super::archive::{ArchiveBuilder, ArchiveConfig}; use super::bytecode::RLIB_BYTECODE_EXTENSION; -use super::linker::Linker; -use super::command::Command; +use rustc_codegen_ssa::back::linker::Linker; +use rustc_codegen_ssa::back::link::{remove, ignored_for_lto, each_linked_rlib, linker_and_flavor, + get_linker}; +use rustc_codegen_ssa::back::command::Command; use super::rpath::RPathConfig; use super::rpath; use metadata::METADATA_FILENAME; use rustc::session::config::{self, DebugInfo, OutputFilenames, OutputType, PrintRequest}; -use rustc::session::config::{RUST_CGU_EXT, Lto}; +use rustc::session::config::{RUST_CGU_EXT, Lto, Sanitizer}; use rustc::session::filesearch; use rustc::session::search_paths::PathKind; use rustc::session::Session; -use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; +use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; -use {CodegenResults, CrateInfo}; +use rustc_codegen_ssa::CodegenResults; use rustc::util::common::time; use rustc_fs_util::fix_windows_verbatim_for_gcc; use rustc::hir::def_id::CrateNum; @@ -47,75 +48,9 @@ use std::str; use syntax::attr; pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default_output_for_target, - invalid_output_for_target, out_filename, check_file_is_writeable, - filename_for_metadata}; + invalid_output_for_target, filename_for_metadata, + out_filename, check_file_is_writeable}; -// The third parameter is for env vars, used on windows to set up the -// path for MSVC to find its DLLs, and gcc to find its bundled -// toolchain -pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { - let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); - - // If our linker looks like a batch script on Windows then to execute this - // we'll need to spawn `cmd` explicitly. This is primarily done to handle - // emscripten where the linker is `emcc.bat` and needs to be spawned as - // `cmd /c emcc.bat ...`. - // - // This worked historically but is needed manually since #42436 (regression - // was tagged as #42791) and some more info can be found on #44443 for - // emscripten itself. - let mut cmd = match linker.to_str() { - Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), - _ => match flavor { - LinkerFlavor::Lld(f) => Command::lld(linker, f), - LinkerFlavor::Msvc - if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => - { - Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) - }, - _ => Command::new(linker), - } - }; - - // The compiler's sysroot often has some bundled tools, so add it to the - // PATH for the child. - let mut new_path = sess.host_filesearch(PathKind::All) - .get_tools_search_paths(); - let mut msvc_changed_path = false; - if sess.target.target.options.is_like_msvc { - if let Some(ref tool) = msvc_tool { - cmd.args(tool.args()); - for &(ref k, ref v) in tool.env() { - if k == "PATH" { - new_path.extend(env::split_paths(v)); - msvc_changed_path = true; - } else { - cmd.env(k, v); - } - } - } - } - - if !msvc_changed_path { - if let Some(path) = env::var_os("PATH") { - new_path.extend(env::split_paths(&path)); - } - } - cmd.env("PATH", env::join_paths(new_path).unwrap()); - - (linker.to_path_buf(), cmd) -} - -pub fn remove(sess: &Session, path: &Path) { - match fs::remove_file(path) { - Ok(..) => {} - Err(e) => { - sess.err(&format!("failed to remove {}: {}", - path.display(), - e)); - } - } -} /// Perform the linkage portion of the compilation phase. This will generate all /// of the requested outputs for this compilation session. @@ -137,19 +72,17 @@ pub(crate) fn link_binary(sess: &Session, bug!("invalid output type `{:?}` for target os `{}`", crate_type, sess.opts.target_triple); } - let mut out_files = link_binary_output(sess, - codegen_results, - crate_type, - outputs, - crate_name); - out_filenames.append(&mut out_files); + let out_files = link_binary_output(sess, + codegen_results, + crate_type, + outputs, + crate_name); + out_filenames.extend(out_files); } // Remove the temporary object file and metadata if we aren't saving temps if !sess.opts.cg.save_temps { - if sess.opts.output_types.should_codegen() && - !preserve_objects_for_their_debuginfo(sess) - { + if sess.opts.output_types.should_codegen() && !preserve_objects_for_their_debuginfo(sess) { for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) { remove(sess, obj); } @@ -186,7 +119,7 @@ fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { // the objects as they're losslessly contained inside the archives. let output_linked = sess.crate_types.borrow() .iter() - .any(|x| *x != config::CrateType::Rlib && *x != config::CrateType::Staticlib); + .any(|&x| x != config::CrateType::Rlib && x != config::CrateType::Staticlib); if !output_linked { return false } @@ -219,60 +152,6 @@ fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { false } -pub(crate) fn each_linked_rlib(sess: &Session, - info: &CrateInfo, - f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { - let crates = info.used_crates_static.iter(); - let fmts = sess.dependency_formats.borrow(); - let fmts = fmts.get(&config::CrateType::Executable) - .or_else(|| fmts.get(&config::CrateType::Staticlib)) - .or_else(|| fmts.get(&config::CrateType::Cdylib)) - .or_else(|| fmts.get(&config::CrateType::ProcMacro)); - let fmts = match fmts { - Some(f) => f, - None => return Err("could not find formats for rlibs".to_string()) - }; - for &(cnum, ref path) in crates { - match fmts.get(cnum.as_usize() - 1) { - Some(&Linkage::NotLinked) | - Some(&Linkage::IncludedFromDylib) => continue, - Some(_) => {} - None => return Err("could not find formats for rlibs".to_string()) - } - let name = &info.crate_name[&cnum]; - let path = match *path { - LibSource::Some(ref p) => p, - LibSource::MetadataOnly => { - return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", - name)) - } - LibSource::None => { - return Err(format!("could not find rlib for: `{}`", name)) - } - }; - f(cnum, &path); - } - Ok(()) -} - -/// Returns a boolean indicating whether the specified crate should be ignored -/// during LTO. -/// -/// Crates ignored during LTO are not lumped together in the "massive object -/// file" that we create and are linked in their normal rlib states. See -/// comments below for what crates do not participate in LTO. -/// -/// It's unusual for a crate to not participate in LTO. Typically only -/// compiler-specific and unstable crates have a reason to not participate in -/// LTO. -pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { - // If our target enables builtin function lowering in LLVM then the - // crates providing these functions don't participate in LTO (e.g. - // no_builtins or compiler builtins crates). - !sess.target.target.options.no_builtins && - (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum)) -} - fn link_binary_output(sess: &Session, codegen_results: &CodegenResults, crate_type: config::CrateType, @@ -291,13 +170,10 @@ fn link_binary_output(sess: &Session, // final destination, with a `fs::rename` call. In order for the rename to // always succeed, the temporary file needs to be on the same filesystem, // which is why we create it inside the output directory specifically. - let metadata_tmpdir = match TempFileBuilder::new() + let metadata_tmpdir = TempFileBuilder::new() .prefix("rmeta") .tempdir_in(out_filename.parent().unwrap()) - { - Ok(tmpdir) => tmpdir, - Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)), - }; + .unwrap_or_else(|err| sess.fatal(&format!("couldn't create a temp dir: {}", err))); let metadata = emit_metadata(sess, codegen_results, &metadata_tmpdir); if let Err(e) = fs::rename(metadata, &out_filename) { sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e)); @@ -305,10 +181,8 @@ fn link_binary_output(sess: &Session, out_filenames.push(out_filename); } - let tmpdir = match TempFileBuilder::new().prefix("rustc").tempdir() { - Ok(tmpdir) => tmpdir, - Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)), - }; + let tmpdir = TempFileBuilder::new().prefix("rustc").tempdir().unwrap_or_else(|err| + sess.fatal(&format!("couldn't create a temp dir: {}", err))); if outputs.outputs.should_codegen() { let out_filename = out_filename(sess, crate_type, outputs, crate_name); @@ -342,7 +216,8 @@ fn archive_search_paths(sess: &Session) -> Vec { sess.target_filesearch(PathKind::Native).for_each_lib_search_path(|path, _| { search.push(path.to_path_buf()); }); - return search; + + search } fn archive_config<'a>(sess: &'a Session, @@ -361,8 +236,11 @@ fn archive_config<'a>(sess: &'a Session, /// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a /// directory being searched for `extern crate` (observing an incomplete file). /// The returned path is the temporary file containing the complete metadata. -fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir) - -> PathBuf { +fn emit_metadata<'a>( + sess: &'a Session, + codegen_results: &CodegenResults, + tmpdir: &TempDir +) -> PathBuf { let out_filename = tmpdir.path().join(METADATA_FILENAME); let result = fs::write(&out_filename, &codegen_results.metadata.raw_data); @@ -584,69 +462,6 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { } } -pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { - fn infer_from( - sess: &Session, - linker: Option, - flavor: Option, - ) -> Option<(PathBuf, LinkerFlavor)> { - match (linker, flavor) { - (Some(linker), Some(flavor)) => Some((linker, flavor)), - // only the linker flavor is known; use the default linker for the selected flavor - (None, Some(flavor)) => Some((PathBuf::from(match flavor { - LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, - LinkerFlavor::Gcc => "cc", - LinkerFlavor::Ld => "ld", - LinkerFlavor::Msvc => "link.exe", - LinkerFlavor::Lld(_) => "lld", - }), flavor)), - (Some(linker), None) => { - let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { - sess.fatal("couldn't extract file stem from specified linker"); - }).to_owned(); - - let flavor = if stem == "emcc" { - LinkerFlavor::Em - } else if stem == "gcc" || stem.ends_with("-gcc") { - LinkerFlavor::Gcc - } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { - LinkerFlavor::Ld - } else if stem == "link" || stem == "lld-link" { - LinkerFlavor::Msvc - } else if stem == "lld" || stem == "rust-lld" { - LinkerFlavor::Lld(sess.target.target.options.lld_flavor) - } else { - // fall back to the value in the target spec - sess.target.target.linker_flavor - }; - - Some((linker, flavor)) - }, - (None, None) => None, - } - } - - // linker and linker flavor specified via command line have precedence over what the target - // specification specifies - if let Some(ret) = infer_from( - sess, - sess.opts.cg.linker.clone(), - sess.opts.debugging_opts.linker_flavor, - ) { - return ret; - } - - if let Some(ret) = infer_from( - sess, - sess.target.target.options.linker.clone().map(PathBuf::from), - Some(sess.target.target.linker_flavor), - ) { - return ret; - } - - bug!("Not enough information provided to determine how to invoke the linker"); -} - // Create a dynamic library or executable // // This will invoke the system linker/cc to create the resulting file. This @@ -676,6 +491,14 @@ fn link_natively(sess: &Session, } cmd.args(&sess.opts.debugging_opts.pre_link_arg); + if sess.target.target.options.is_like_fuchsia { + let prefix = match sess.opts.debugging_opts.sanitizer { + Some(Sanitizer::Address) => "asan/", + _ => "", + }; + cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix)); + } + let pre_link_objects = if crate_type == config::CrateType::Executable { &sess.target.target.options.pre_link_objects_exe } else { @@ -701,7 +524,8 @@ fn link_natively(sess: &Session, } { - let mut linker = codegen_results.linker_info.to_linker(cmd, &sess, flavor); + let target_cpu = ::llvm_util::target_cpu(sess); + let mut linker = codegen_results.linker_info.to_linker(cmd, &sess, flavor, target_cpu); link_args(&mut *linker, flavor, sess, crate_type, tmpdir, out_filename, codegen_results); cmd = linker.finalize(); @@ -813,8 +637,8 @@ fn link_natively(sess: &Session, .unwrap_or_else(|_| { let mut x = "Non-UTF-8 output: ".to_string(); x.extend(s.iter() - .flat_map(|&b| ascii::escape_default(b)) - .map(|b| char::from_u32(b as u32).unwrap())); + .flat_map(|&b| ascii::escape_default(b)) + .map(char::from)); x }) } @@ -869,14 +693,18 @@ fn link_natively(sess: &Session, sess.opts.debuginfo != DebugInfo::None && !preserve_objects_for_their_debuginfo(sess) { - match Command::new("dsymutil").arg(out_filename).output() { - Ok(..) => {} - Err(e) => sess.fatal(&format!("failed to run dsymutil: {}", e)), + if let Err(e) = Command::new("dsymutil").arg(out_filename).output() { + sess.fatal(&format!("failed to run dsymutil: {}", e)) } } if sess.opts.target_triple.triple() == "wasm32-unknown-unknown" { wasm::rewrite_imports(&out_filename, &codegen_results.crate_info.wasm_imports); + wasm::add_producer_section( + &out_filename, + &sess.edition().to_string(), + option_env!("CFG_VERSION").unwrap_or("unknown"), + ); } } @@ -1011,8 +839,7 @@ fn exec_linker(sess: &Session, cmd: &mut Command, out_filename: &Path, tmpdir: & // ensure the line is interpreted as one whole argument. for c in self.arg.chars() { match c { - '\\' | - ' ' => write!(f, "\\{}", c)?, + '\\' | ' ' => write!(f, "\\{}", c)?, c => write!(f, "{}", c)?, } } @@ -1425,7 +1252,6 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, for f in archive.src_files() { if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME { archive.remove_file(&f); - continue } } diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 61856236a1..99828e5b7f 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -9,14 +9,14 @@ // except according to those terms. use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; -use back::symbol_export; -use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; -use back::write::{self, DiagnosticHandlers, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::symbol_export; +use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinShared, ThinModule}; +use rustc_codegen_ssa::traits::*; +use back::write::{self, DiagnosticHandlers, with_llvm_pmb, save_temp_bitcode, get_llvm_opt_level}; use errors::{FatalError, Handler}; use llvm::archive_ro::ArchiveRO; -use llvm::{True, False}; -use llvm; -use memmap; +use llvm::{self, True, False}; use rustc::dep_graph::WorkProduct; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::LOCAL_CRATE; @@ -25,7 +25,8 @@ use rustc::session::config::{self, Lto}; use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; -use {ModuleCodegen, ModuleLlvm, ModuleKind}; +use {ModuleLlvm, LlvmCodegenBackend}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use libc; @@ -47,71 +48,16 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } } -pub(crate) enum LtoModuleCodegen { - Fat { - module: Option, - _serialized_bitcode: Vec, - }, - - Thin(ThinModule), -} - -impl LtoModuleCodegen { - pub fn name(&self) -> &str { - match *self { - LtoModuleCodegen::Fat { .. } => "everything", - LtoModuleCodegen::Thin(ref m) => m.name(), - } - } - - /// Optimize this module within the given codegen context. - /// - /// This function is unsafe as it'll return a `ModuleCodegen` still - /// points to LLVM data structures owned by this `LtoModuleCodegen`. - /// It's intended that the module returned is immediately code generated and - /// dropped, and then this LTO module is dropped. - pub(crate) unsafe fn optimize(&mut self, - cgcx: &CodegenContext, - timeline: &mut Timeline) - -> Result - { - match *self { - LtoModuleCodegen::Fat { ref mut module, .. } => { - let module = module.take().unwrap(); - { - let config = cgcx.config(module.kind); - let llmod = module.module_llvm.llmod(); - let tm = &*module.module_llvm.tm; - run_pass_manager(cgcx, tm, llmod, config, false); - timeline.record("fat-done"); - } - Ok(module) - } - LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline), - } - } - - /// A "gauge" of how costly it is to optimize this module, used to sort - /// biggest modules first. - pub fn cost(&self) -> u64 { - match *self { - // Only one module with fat LTO, so the cost doesn't matter. - LtoModuleCodegen::Fat { .. } => 0, - LtoModuleCodegen::Thin(ref m) => m.cost(), - } - } -} - /// Performs LTO, which in the case of full LTO means merging all modules into /// a single one and returning it for further optimizing. For ThinLTO, it will /// do the global analysis necessary and return two lists, one of the modules /// the need optimization and another for modules that can simply be copied over /// from the incr. comp. cache. -pub(crate) fn run(cgcx: &CodegenContext, - modules: Vec, - cached_modules: Vec<(SerializedModule, WorkProduct)>, +pub(crate) fn run(cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { let diag_handler = cgcx.create_diag_handler(); let export_threshold = match cgcx.lto { @@ -205,11 +151,11 @@ pub(crate) fn run(cgcx: &CodegenContext, Lto::Fat => { assert!(cached_modules.is_empty()); let opt_jobs = fat_lto(cgcx, - &diag_handler, - modules, - upstream_modules, - &symbol_white_list, - timeline); + &diag_handler, + modules, + upstream_modules, + &symbol_white_list, + timeline); opt_jobs.map(|opt_jobs| (opt_jobs, vec![])) } Lto::Thin | @@ -230,13 +176,13 @@ pub(crate) fn run(cgcx: &CodegenContext, } } -fn fat_lto(cgcx: &CodegenContext, +fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, - mut modules: Vec, - mut serialized_modules: Vec<(SerializedModule, CString)>, + mut modules: Vec>, + mut serialized_modules: Vec<(SerializedModule, CString)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result, FatalError> + -> Result>, FatalError> { info!("going for a fat lto"); @@ -279,11 +225,12 @@ fn fat_lto(cgcx: &CodegenContext, // and we want to move everything to the same LLVM context. Currently the // way we know of to do that is to serialize them to a string and them parse // them later. Not great but hey, that's why it's "fat" LTO, right? - for module in modules { + serialized_modules.extend(modules.into_iter().map(|module| { let buffer = ModuleBuffer::new(module.module_llvm.llmod()); let llmod_id = CString::new(&module.name[..]).unwrap(); - serialized_modules.push((SerializedModule::Local(buffer), llmod_id)); - } + + (SerializedModule::Local(buffer), llmod_id) + })); // For all serialized bitcode files we parse them and link them in as we did // above, this is all mostly handled in C++. Like above, though, we don't @@ -296,30 +243,30 @@ fn fat_lto(cgcx: &CodegenContext, let data = bc_decoded.data(); linker.add(&data).map_err(|()| { let msg = format!("failed to load bc of {:?}", name); - write::llvm_err(&diag_handler, msg) + write::llvm_err(&diag_handler, &msg) }) })?; timeline.record(&format!("link {:?}", name)); serialized_bitcode.push(bc_decoded); } drop(linker); - cgcx.save_temp_bitcode(&module, "lto.input"); + save_temp_bitcode(&cgcx, &module, "lto.input"); // Internalize everything that *isn't* in our whitelist to help strip out // more modules and such unsafe { let ptr = symbol_white_list.as_ptr(); llvm::LLVMRustRunRestrictionPass(llmod, - ptr as *const *const libc::c_char, - symbol_white_list.len() as libc::size_t); - cgcx.save_temp_bitcode(&module, "lto.after-restriction"); + ptr as *const *const libc::c_char, + symbol_white_list.len() as libc::size_t); + save_temp_bitcode(&cgcx, &module, "lto.after-restriction"); } if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } - cgcx.save_temp_bitcode(&module, "lto.after-nounwind"); + save_temp_bitcode(&cgcx, &module, "lto.after-nounwind"); } timeline.record("passes"); } @@ -386,14 +333,14 @@ impl Drop for Linker<'a> { /// calculating the *index* for ThinLTO. This index will then be shared amongst /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. -fn thin_lto(cgcx: &CodegenContext, +fn thin_lto(cgcx: &CodegenContext, diag_handler: &Handler, - modules: Vec, - serialized_modules: Vec<(SerializedModule, CString)>, - cached_modules: Vec<(SerializedModule, WorkProduct)>, + modules: Vec>, + serialized_modules: Vec<(SerializedModule, CString)>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { unsafe { info!("going for that thin, thin LTO"); @@ -403,9 +350,10 @@ fn thin_lto(cgcx: &CodegenContext, .map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())) .collect(); - let mut thin_buffers = Vec::new(); - let mut module_names = Vec::new(); - let mut thin_modules = Vec::new(); + let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len(); + let mut thin_buffers = Vec::with_capacity(modules.len()); + let mut module_names = Vec::with_capacity(full_scope_len); + let mut thin_modules = Vec::with_capacity(full_scope_len); // FIXME: right now, like with fat LTO, we serialize all in-memory // modules before working with them and ThinLTO. We really @@ -414,7 +362,7 @@ fn thin_lto(cgcx: &CodegenContext, // into the global index. It turns out that this loop is by far // the most expensive portion of this small bit of global // analysis! - for (i, module) in modules.iter().enumerate() { + for (i, module) in modules.into_iter().enumerate() { info!("local module: {} - {}", i, module.name); let name = CString::new(module.name.clone()).unwrap(); let buffer = ThinBuffer::new(module.module_llvm.llmod()); @@ -460,7 +408,7 @@ fn thin_lto(cgcx: &CodegenContext, // incremental ThinLTO first where we could actually avoid // looking at upstream modules entirely sometimes (the contents, // we must always unconditionally look at the index). - let mut serialized = Vec::new(); + let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len()); let cached_modules = cached_modules.into_iter().map(|(sm, wp)| { (sm, CString::new(wp.cgu_name).unwrap()) @@ -490,7 +438,7 @@ fn thin_lto(cgcx: &CodegenContext, symbol_white_list.as_ptr(), symbol_white_list.len() as u32, ).ok_or_else(|| { - write::llvm_err(&diag_handler, "failed to prepare thin LTO context".to_string()) + write::llvm_err(&diag_handler, "failed to prepare thin LTO context") })?; info!("thin LTO data created"); @@ -556,9 +504,8 @@ fn thin_lto(cgcx: &CodegenContext, } } -fn run_pass_manager(cgcx: &CodegenContext, - tm: &llvm::TargetMachine, - llmod: &llvm::Module, +pub(crate) fn run_pass_manager(cgcx: &CodegenContext, + module: &ModuleCodegen, config: &ModuleConfig, thin: bool) { // Now we have one massive module inside of llmod. Time to run the @@ -569,7 +516,7 @@ fn run_pass_manager(cgcx: &CodegenContext, debug!("running the pass manager"); unsafe { let pm = llvm::LLVMCreatePassManager(); - llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod); + llvm::LLVMRustAddAnalysisPasses(module.module_llvm.tm, pm, module.module_llvm.llmod()); if config.verify_llvm_ir { let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); @@ -588,16 +535,15 @@ fn run_pass_manager(cgcx: &CodegenContext, // Note that in general this shouldn't matter too much as you typically // only turn on ThinLTO when you're compiling with optimizations // otherwise. - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let opt_level = match opt_level { llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less, level => level, }; - with_llvm_pmb(llmod, config, opt_level, false, &mut |b| { + with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| { if thin { - if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) { - panic!("this version of LLVM does not support ThinLTO"); - } + llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm); } else { llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm, /* Internalize = */ False, @@ -605,35 +551,26 @@ fn run_pass_manager(cgcx: &CodegenContext, } }); + // We always generate bitcode through ThinLTOBuffers, + // which do not support anonymous globals + if config.bitcode_needed() { + let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr() as *const _); + llvm::LLVMRustAddPass(pm, pass.unwrap()); + } + if config.verify_llvm_ir { let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); llvm::LLVMRustAddPass(pm, pass.unwrap()); } time_ext(cgcx.time_passes, None, "LTO passes", || - llvm::LLVMRunPassManager(pm, llmod)); + llvm::LLVMRunPassManager(pm, module.module_llvm.llmod())); llvm::LLVMDisposePassManager(pm); } debug!("lto done"); } -pub enum SerializedModule { - Local(ModuleBuffer), - FromRlib(Vec), - FromUncompressedFile(memmap::Mmap), -} - -impl SerializedModule { - fn data(&self) -> &[u8] { - match *self { - SerializedModule::Local(ref m) => m.data(), - SerializedModule::FromRlib(ref m) => m, - SerializedModule::FromUncompressedFile(ref m) => m, - } - } -} - pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); unsafe impl Send for ModuleBuffer {} @@ -645,8 +582,10 @@ impl ModuleBuffer { llvm::LLVMRustModuleBufferCreate(m) }) } +} - pub fn data(&self) -> &[u8] { +impl ModuleBufferMethods for ModuleBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustModuleBufferPtr(self.0); let len = llvm::LLVMRustModuleBufferLen(self.0); @@ -661,19 +600,7 @@ impl Drop for ModuleBuffer { } } -pub struct ThinModule { - shared: Arc, - idx: usize, -} - -struct ThinShared { - data: ThinData, - thin_buffers: Vec, - serialized_modules: Vec, - module_names: Vec, -} - -struct ThinData(&'static mut llvm::ThinLTOData); +pub struct ThinData(&'static mut llvm::ThinLTOData); unsafe impl Send for ThinData {} unsafe impl Sync for ThinData {} @@ -698,8 +625,10 @@ impl ThinBuffer { ThinBuffer(buffer) } } +} - pub fn data(&self) -> &[u8] { +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; let len = llvm::LLVMRustThinLTOBufferLen(self.0); @@ -716,161 +645,142 @@ impl Drop for ThinBuffer { } } -impl ThinModule { - fn name(&self) -> &str { - self.shared.module_names[self.idx].to_str().unwrap() - } +pub unsafe fn optimize_thin_module( + thin_module: &mut ThinModule, + cgcx: &CodegenContext, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + let tm = (cgcx.tm_factory.0)().map_err(|e| { + write::llvm_err(&diag_handler, &e) + })?; - fn cost(&self) -> u64 { - // Yes, that's correct, we're using the size of the bytecode as an - // indicator for how costly this codegen unit is. - self.data().len() as u64 - } - - fn data(&self) -> &[u8] { - let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); - a.unwrap_or_else(|| { - let len = self.shared.thin_buffers.len(); - self.shared.serialized_modules[self.idx - len].data() - }) - } - - unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) - -> Result - { - let diag_handler = cgcx.create_diag_handler(); - let tm = (cgcx.tm_factory)().map_err(|e| { - write::llvm_err(&diag_handler, e) - })?; - - // Right now the implementation we've got only works over serialized - // modules, so we create a fresh new LLVM context and parse the module - // into that context. One day, however, we may do this for upstream - // crates but for locally codegened modules we may be able to reuse - // that LLVM Context and Module. - let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + // Right now the implementation we've got only works over serialized + // modules, so we create a fresh new LLVM context and parse the module + // into that context. One day, however, we may do this for upstream + // crates but for locally codegened modules we may be able to reuse + // that LLVM Context and Module. + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + llcx, + thin_module.data().as_ptr(), + thin_module.data().len(), + thin_module.shared.module_names[thin_module.idx].as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for thin LTO module"; + write::llvm_err(&diag_handler, msg) + })? as *const _; + let module = ModuleCodegen { + module_llvm: ModuleLlvm { + llmod_raw, llcx, - self.data().as_ptr(), - self.data().len(), - self.shared.module_names[self.idx].as_ptr(), - ).ok_or_else(|| { - let msg = "failed to parse bitcode for thin LTO module".to_string(); - write::llvm_err(&diag_handler, msg) - })? as *const _; - let module = ModuleCodegen { - module_llvm: ModuleLlvm { - llmod_raw, - llcx, - tm, - }, - name: self.name().to_string(), - kind: ModuleKind::Regular, - }; - { - let llmod = module.module_llvm.llmod(); - cgcx.save_temp_bitcode(&module, "thin-lto-input"); + tm, + }, + name: thin_module.name().to_string(), + kind: ModuleKind::Regular, + }; + { + let llmod = module.module_llvm.llmod(); + save_temp_bitcode(&cgcx, &module, "thin-lto-input"); - // Before we do much else find the "main" `DICompileUnit` that we'll be - // using below. If we find more than one though then rustc has changed - // in a way we're not ready for, so generate an ICE by returning - // an error. - let mut cu1 = ptr::null_mut(); - let mut cu2 = ptr::null_mut(); - llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); - if !cu2.is_null() { - let msg = "multiple source DICompileUnits found".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - - // Like with "fat" LTO, get some better optimizations if landing pads - // are disabled by removing all landing pads. - if cgcx.no_landing_pads { - llvm::LLVMRustMarkAllFunctionsNounwind(llmod); - cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind"); - timeline.record("nounwind"); - } - - // Up next comes the per-module local analyses that we do for Thin LTO. - // Each of these functions is basically copied from the LLVM - // implementation and then tailored to suit this implementation. Ideally - // each of these would be supported by upstream LLVM but that's perhaps - // a patch for another day! - // - // You can find some more comments about these functions in the LLVM - // bindings we've got (currently `PassWrapper.cpp`) - if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-rename"); - timeline.record("rename"); - if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve"); - timeline.record("resolve"); - if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize"); - timeline.record("internalize"); - if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-import"); - timeline.record("import"); - - // Ok now this is a bit unfortunate. This is also something you won't - // find upstream in LLVM's ThinLTO passes! This is a hack for now to - // work around bugs in LLVM. - // - // First discovered in #45511 it was found that as part of ThinLTO - // importing passes LLVM will import `DICompileUnit` metadata - // information across modules. This means that we'll be working with one - // LLVM module that has multiple `DICompileUnit` instances in it (a - // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of - // bugs in LLVM's backend which generates invalid DWARF in a situation - // like this: - // - // https://bugs.llvm.org/show_bug.cgi?id=35212 - // https://bugs.llvm.org/show_bug.cgi?id=35562 - // - // While the first bug there is fixed the second ended up causing #46346 - // which was basically a resurgence of #45511 after LLVM's bug 35212 was - // fixed. - // - // This function below is a huge hack around this problem. The function - // below is defined in `PassWrapper.cpp` and will basically "merge" - // all `DICompileUnit` instances in a module. Basically it'll take all - // the objects, rewrite all pointers of `DISubprogram` to point to the - // first `DICompileUnit`, and then delete all the other units. - // - // This is probably mangling to the debug info slightly (but hopefully - // not too much) but for now at least gets LLVM to emit valid DWARF (or - // so it appears). Hopefully we can remove this once upstream bugs are - // fixed in LLVM. - llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); - cgcx.save_temp_bitcode(&module, "thin-lto-after-patch"); - timeline.record("patch"); - - // Alright now that we've done everything related to the ThinLTO - // analysis it's time to run some optimizations! Here we use the same - // `run_pass_manager` as the "fat" LTO above except that we tell it to - // populate a thin-specific pass manager, which presumably LLVM treats a - // little differently. - info!("running thin lto passes over {}", module.name); - let config = cgcx.config(module.kind); - run_pass_manager(cgcx, module.module_llvm.tm, llmod, config, true); - cgcx.save_temp_bitcode(&module, "thin-lto-after-pm"); - timeline.record("thin-done"); + // Before we do much else find the "main" `DICompileUnit` that we'll be + // using below. If we find more than one though then rustc has changed + // in a way we're not ready for, so generate an ICE by returning + // an error. + let mut cu1 = ptr::null_mut(); + let mut cu2 = ptr::null_mut(); + llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); + if !cu2.is_null() { + let msg = "multiple source DICompileUnits found"; + return Err(write::llvm_err(&diag_handler, msg)) } - Ok(module) + // Like with "fat" LTO, get some better optimizations if landing pads + // are disabled by removing all landing pads. + if cgcx.no_landing_pads { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind"); + timeline.record("nounwind"); + } + + // Up next comes the per-module local analyses that we do for Thin LTO. + // Each of these functions is basically copied from the LLVM + // implementation and then tailored to suit this implementation. Ideally + // each of these would be supported by upstream LLVM but that's perhaps + // a patch for another day! + // + // You can find some more comments about these functions in the LLVM + // bindings we've got (currently `PassWrapper.cpp`) + if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); + timeline.record("rename"); + if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); + timeline.record("resolve"); + if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); + timeline.record("internalize"); + if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); + timeline.record("import"); + + // Ok now this is a bit unfortunate. This is also something you won't + // find upstream in LLVM's ThinLTO passes! This is a hack for now to + // work around bugs in LLVM. + // + // First discovered in #45511 it was found that as part of ThinLTO + // importing passes LLVM will import `DICompileUnit` metadata + // information across modules. This means that we'll be working with one + // LLVM module that has multiple `DICompileUnit` instances in it (a + // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of + // bugs in LLVM's backend which generates invalid DWARF in a situation + // like this: + // + // https://bugs.llvm.org/show_bug.cgi?id=35212 + // https://bugs.llvm.org/show_bug.cgi?id=35562 + // + // While the first bug there is fixed the second ended up causing #46346 + // which was basically a resurgence of #45511 after LLVM's bug 35212 was + // fixed. + // + // This function below is a huge hack around this problem. The function + // below is defined in `PassWrapper.cpp` and will basically "merge" + // all `DICompileUnit` instances in a module. Basically it'll take all + // the objects, rewrite all pointers of `DISubprogram` to point to the + // first `DICompileUnit`, and then delete all the other units. + // + // This is probably mangling to the debug info slightly (but hopefully + // not too much) but for now at least gets LLVM to emit valid DWARF (or + // so it appears). Hopefully we can remove this once upstream bugs are + // fixed in LLVM. + llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); + save_temp_bitcode(cgcx, &module, "thin-lto-after-patch"); + timeline.record("patch"); + + // Alright now that we've done everything related to the ThinLTO + // analysis it's time to run some optimizations! Here we use the same + // `run_pass_manager` as the "fat" LTO above except that we tell it to + // populate a thin-specific pass manager, which presumably LLVM treats a + // little differently. + info!("running thin lto passes over {}", module.name); + let config = cgcx.config(module.kind); + run_pass_manager(cgcx, &module, config, true); + save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); + timeline.record("thin-done"); } + Ok(module) } #[derive(Debug, Default)] @@ -913,12 +823,6 @@ impl ThinLTOImports { } fn module_name_to_str(c_str: &CStr) -> &str { - match c_str.to_str() { - Ok(s) => s, - Err(e) => { - bug!("Encountered non-utf8 LLVM module name `{}`: {}", - c_str.to_string_lossy(), - e) - } - } + c_str.to_str().unwrap_or_else(|e| + bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)) } diff --git a/src/librustc_codegen_llvm/back/rpath.rs b/src/librustc_codegen_llvm/back/rpath.rs index 9609cb0c15..73a7366d0a 100644 --- a/src/librustc_codegen_llvm/back/rpath.rs +++ b/src/librustc_codegen_llvm/back/rpath.rs @@ -31,25 +31,24 @@ pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec { return Vec::new(); } - let mut flags = Vec::new(); - debug!("preparing the RPATH!"); let libs = config.used_crates.clone(); let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::>(); let rpaths = get_rpaths(config, &libs); - flags.extend_from_slice(&rpaths_to_flags(&rpaths)); + let mut flags = rpaths_to_flags(&rpaths); // Use DT_RUNPATH instead of DT_RPATH if available if config.linker_is_gnu { - flags.push("-Wl,--enable-new-dtags".to_string()); + flags.push("-Wl,--enable-new-dtags".to_owned()); } flags } fn rpaths_to_flags(rpaths: &[String]) -> Vec { - let mut ret = Vec::new(); + let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity + for rpath in rpaths { if rpath.contains(',') { ret.push("-Wl,-rpath".into()); @@ -59,7 +58,8 @@ fn rpaths_to_flags(rpaths: &[String]) -> Vec { ret.push(format!("-Wl,-rpath,{}", &(*rpath))); } } - return ret; + + ret } fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec { @@ -92,7 +92,8 @@ fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec { // Remove duplicates let rpaths = minimize_rpaths(&rpaths); - return rpaths; + + rpaths } fn get_rpaths_relative_to_output(config: &mut RPathConfig, @@ -117,8 +118,7 @@ fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String let relative = path_relative_from(&lib, &output).unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib)); // FIXME (#9639): This needs to handle non-utf8 paths - format!("{}/{}", prefix, - relative.to_str().expect("non-utf8 component in path")) + format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path")) } // This routine is adapted from the *old* Path's `path_relative_from` @@ -168,7 +168,7 @@ fn get_install_prefix_rpath(config: &mut RPathConfig) -> String { let path = (config.get_install_prefix_lib_path)(); let path = env::current_dir().unwrap().join(&path); // FIXME (#9639): This needs to handle non-utf8 paths - path.to_str().expect("non-utf8 component in rpath").to_string() + path.to_str().expect("non-utf8 component in rpath").to_owned() } fn minimize_rpaths(rpaths: &[String]) -> Vec { diff --git a/src/librustc_codegen_llvm/back/wasm.rs b/src/librustc_codegen_llvm/back/wasm.rs index f37854b7bc..1a5c65f3c4 100644 --- a/src/librustc_codegen_llvm/back/wasm.rs +++ b/src/librustc_codegen_llvm/back/wasm.rs @@ -17,6 +17,7 @@ use serialize::leb128; // https://webassembly.github.io/spec/core/binary/modules.html#binary-importsec const WASM_IMPORT_SECTION_ID: u8 = 2; +const WASM_CUSTOM_SECTION_ID: u8 = 0; const WASM_EXTERNAL_KIND_FUNCTION: u8 = 0; const WASM_EXTERNAL_KIND_TABLE: u8 = 1; @@ -42,7 +43,7 @@ const WASM_EXTERNAL_KIND_GLOBAL: u8 = 3; /// https://github.com/llvm-mirror/llvm/commit/0f32e1365, although support still /// needs to be added, tracked at https://bugs.llvm.org/show_bug.cgi?id=37168 pub fn rewrite_imports(path: &Path, import_map: &FxHashMap) { - if import_map.len() == 0 { + if import_map.is_empty() { return } @@ -121,13 +122,119 @@ pub fn rewrite_imports(path: &Path, import_map: &FxHashMap) { } } +/// Add or augment the existing `producers` section to encode information about +/// the Rust compiler used to produce the wasm file. +pub fn add_producer_section( + path: &Path, + rust_version: &str, + rustc_version: &str, +) { + struct Field<'a> { + name: &'a str, + values: Vec>, + } + + #[derive(Copy, Clone)] + struct FieldValue<'a> { + name: &'a str, + version: &'a str, + } + + let wasm = fs::read(path).expect("failed to read wasm output"); + let mut ret = WasmEncoder::new(); + ret.data.extend(&wasm[..8]); + + // skip the 8 byte wasm/version header + let rustc_value = FieldValue { + name: "rustc", + version: rustc_version, + }; + let rust_value = FieldValue { + name: "Rust", + version: rust_version, + }; + let mut fields = Vec::new(); + let mut wrote_rustc = false; + let mut wrote_rust = false; + + // Move all sections from the original wasm file to our output, skipping + // everything except the producers section + for (id, raw) in WasmSections(WasmDecoder::new(&wasm[8..])) { + if id != WASM_CUSTOM_SECTION_ID { + ret.byte(id); + ret.bytes(raw); + continue + } + let mut decoder = WasmDecoder::new(raw); + if decoder.str() != "producers" { + ret.byte(id); + ret.bytes(raw); + continue + } + + // Read off the producers section into our fields outside the loop, + // we'll re-encode the producers section when we're done (to handle an + // entirely missing producers section as well). + info!("rewriting existing producers section"); + + for _ in 0..decoder.u32() { + let name = decoder.str(); + let mut values = Vec::new(); + for _ in 0..decoder.u32() { + let name = decoder.str(); + let version = decoder.str(); + values.push(FieldValue { name, version }); + } + + if name == "language" { + values.push(rust_value); + wrote_rust = true; + } else if name == "processed-by" { + values.push(rustc_value); + wrote_rustc = true; + } + fields.push(Field { name, values }); + } + } + + if !wrote_rust { + fields.push(Field { + name: "language", + values: vec![rust_value], + }); + } + if !wrote_rustc { + fields.push(Field { + name: "processed-by", + values: vec![rustc_value], + }); + } + + // Append the producers section to the end of the wasm file. + let mut section = WasmEncoder::new(); + section.str("producers"); + section.u32(fields.len() as u32); + for field in fields { + section.str(field.name); + section.u32(field.values.len() as u32); + for value in field.values { + section.str(value.name); + section.str(value.version); + } + } + ret.byte(WASM_CUSTOM_SECTION_ID); + ret.bytes(§ion.data); + + fs::write(path, &ret.data).expect("failed to write wasm output"); +} + struct WasmSections<'a>(WasmDecoder<'a>); impl<'a> Iterator for WasmSections<'a> { type Item = (u8, &'a [u8]); fn next(&mut self) -> Option<(u8, &'a [u8])> { - if self.0.data.len() == 0 { + if self.0.data.is_empty() { return None } diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 81619c2197..2ddbd0c299 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -10,61 +10,38 @@ use attributes; use back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; -use back::lto::{self, ModuleBuffer, ThinBuffer, SerializedModule}; -use back::link::{self, get_linker, remove}; -use back::command::Command; -use back::linker::LinkerInfo; -use back::symbol_export::ExportedSymbols; +use back::lto::ThinBuffer; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler}; +use rustc_codegen_ssa::traits::*; use base; use consts; -use memmap; -use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, - in_incr_comp_dir, in_incr_comp_dir_sess}; -use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; -use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; -use rustc::middle::cstore::EncodedMetadata; -use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::config::{self, OutputType, Passes, Lto}; use rustc::session::Session; -use rustc::util::nodemap::FxHashMap; -use time_graph::{self, TimeGraph, Timeline}; +use time_graph::Timeline; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; -use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, - CachedModuleCodegen}; -use CrateInfo; -use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; -use rustc::ty::TyCtxt; -use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry}; -use rustc_fs_util::{path2cstr, link_or_copy}; +use ModuleLlvm; +use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; +use rustc::util::common::time_ext; +use rustc_fs_util::{path_to_c_string, link_or_copy}; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::svh::Svh; -use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; -use errors::emitter::{Emitter}; -use syntax::attr; -use syntax::ext::hygiene::Mark; -use syntax_pos::MultiSpan; -use syntax_pos::symbol::Symbol; +use errors::{self, Handler, FatalError}; use type_::Type; use context::{is_pie_binary, get_reloc_model}; -use common::{C_bytes_in_context, val_ty}; -use jobserver::{Client, Acquired}; +use common; +use LlvmCodegenBackend; use rustc_demangle; -use std::any::Any; use std::ffi::{CString, CStr}; use std::fs; use std::io::{self, Write}; -use std::mem; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::str; use std::sync::Arc; -use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; -use std::time::Instant; -use std::thread; use libc::{c_uint, c_void, c_char, size_t}; -pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ +pub const RELOC_MODEL_ARGS : [(&str, llvm::RelocMode); 7] = [ ("pic", llvm::RelocMode::PIC), ("static", llvm::RelocMode::Static), ("default", llvm::RelocMode::Default), @@ -81,16 +58,14 @@ pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[ ("large", llvm::CodeModel::Large), ]; -pub const TLS_MODEL_ARGS : [(&'static str, llvm::ThreadLocalMode); 4] = [ +pub const TLS_MODEL_ARGS : [(&str, llvm::ThreadLocalMode); 4] = [ ("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic), ("local-dynamic", llvm::ThreadLocalMode::LocalDynamic), ("initial-exec", llvm::ThreadLocalMode::InitialExec), ("local-exec", llvm::ThreadLocalMode::LocalExec), ]; -const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; - -pub fn llvm_err(handler: &errors::Handler, msg: String) -> FatalError { +pub fn llvm_err(handler: &errors::Handler, msg: &str) -> FatalError { match llvm::last_error() { Some(err) => handler.fatal(&format!("{}: {}", msg, err)), None => handler.fatal(&msg), @@ -105,19 +80,18 @@ pub fn write_output_file( output: &Path, file_type: llvm::FileType) -> Result<(), FatalError> { unsafe { - let output_c = path2cstr(output); - let result = llvm::LLVMRustWriteOutputFile( - target, pm, m, output_c.as_ptr(), file_type); + let output_c = path_to_c_string(output); + let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type); if result.into_result().is_err() { let msg = format!("could not write output to {}", output.display()); - Err(llvm_err(handler, msg)) + Err(llvm_err(handler, &msg)) } else { Ok(()) } } } -fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { +pub(crate) fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { match optimize { config::OptLevel::No => llvm::CodeGenOptLevel::None, config::OptLevel::Less => llvm::CodeGenOptLevel::Less, @@ -127,7 +101,7 @@ fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { } } -fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { +pub(crate) fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { match optimize { config::OptLevel::Size => llvm::CodeGenOptSizeDefault, config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, @@ -140,7 +114,7 @@ pub fn create_target_machine( find_features: bool, ) -> &'static mut llvm::TargetMachine { target_machine_factory(sess, find_features)().unwrap_or_else(|err| { - llvm_err(sess.diagnostic(), err).raise() + llvm_err(sess.diagnostic(), &err).raise() }) } @@ -225,207 +199,31 @@ pub fn target_machine_factory(sess: &Session, find_features: bool) }) } -/// Module-specific configuration for `optimize_and_codegen`. -pub struct ModuleConfig { - /// Names of additional optimization passes to run. - passes: Vec, - /// Some(level) to optimize at a certain level, or None to run - /// absolutely no optimizations (used for the metadata module). - pub opt_level: Option, - - /// Some(level) to optimize binary size, or None to not affect program size. - opt_size: Option, - - pgo_gen: Option, - pgo_use: String, - - // Flags indicating which outputs to produce. - pub emit_pre_thin_lto_bc: bool, - emit_no_opt_bc: bool, - emit_bc: bool, - emit_bc_compressed: bool, - emit_lto_bc: bool, - emit_ir: bool, - emit_asm: bool, - emit_obj: bool, - // Miscellaneous flags. These are mostly copied from command-line - // options. - pub verify_llvm_ir: bool, - no_prepopulate_passes: bool, - no_builtins: bool, - time_passes: bool, - vectorize_loop: bool, - vectorize_slp: bool, - merge_functions: bool, - inline_threshold: Option, - // Instead of creating an object file by doing LLVM codegen, just - // make the object file bitcode. Provides easy compatibility with - // emscripten's ecc compiler, when used as the linker. - obj_is_bitcode: bool, - no_integrated_as: bool, - embed_bitcode: bool, - embed_bitcode_marker: bool, -} - -impl ModuleConfig { - fn new(passes: Vec) -> ModuleConfig { - ModuleConfig { - passes, - opt_level: None, - opt_size: None, - - pgo_gen: None, - pgo_use: String::new(), - - emit_no_opt_bc: false, - emit_pre_thin_lto_bc: false, - emit_bc: false, - emit_bc_compressed: false, - emit_lto_bc: false, - emit_ir: false, - emit_asm: false, - emit_obj: false, - obj_is_bitcode: false, - embed_bitcode: false, - embed_bitcode_marker: false, - no_integrated_as: false, - - verify_llvm_ir: false, - no_prepopulate_passes: false, - no_builtins: false, - time_passes: false, - vectorize_loop: false, - vectorize_slp: false, - merge_functions: false, - inline_threshold: None - } +pub(crate) fn save_temp_bitcode( + cgcx: &CodegenContext, + module: &ModuleCodegen, + name: &str +) { + if !cgcx.save_temps { + return } - - fn set_flags(&mut self, sess: &Session, no_builtins: bool) { - self.verify_llvm_ir = sess.verify_llvm_ir(); - self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; - self.no_builtins = no_builtins || sess.target.target.options.no_builtins; - self.time_passes = sess.time_passes(); - self.inline_threshold = sess.opts.cg.inline_threshold; - self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || - sess.opts.debugging_opts.cross_lang_lto.enabled(); - let embed_bitcode = sess.target.target.options.embed_bitcode || - sess.opts.debugging_opts.embed_bitcode; - if embed_bitcode { - match sess.opts.optimize { - config::OptLevel::No | - config::OptLevel::Less => { - self.embed_bitcode_marker = embed_bitcode; - } - _ => self.embed_bitcode = embed_bitcode, - } - } - - // Copy what clang does by turning on loop vectorization at O2 and - // slp vectorization at O3. Otherwise configure other optimization aspects - // of this pass manager builder. - // Turn off vectorization for emscripten, as it's not very well supported. - self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && - (sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive) && - !sess.target.target.options.is_like_emscripten; - - self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && - sess.opts.optimize == config::OptLevel::Aggressive && - !sess.target.target.options.is_like_emscripten; - - self.merge_functions = sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive; - } -} - -/// Assembler name and command used by codegen when no_integrated_as is enabled -struct AssemblerCommand { - name: PathBuf, - cmd: Command, -} - -/// Additional resources used by optimize_and_codegen (not module specific) -#[derive(Clone)] -pub struct CodegenContext { - // Resources needed when running LTO - pub time_passes: bool, - pub lto: Lto, - pub no_landing_pads: bool, - pub save_temps: bool, - pub fewer_names: bool, - pub exported_symbols: Option>, - pub opts: Arc, - pub crate_types: Vec, - pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, - output_filenames: Arc, - regular_module_config: Arc, - metadata_module_config: Arc, - allocator_module_config: Arc, - pub tm_factory: Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync>, - pub msvc_imps_needed: bool, - pub target_pointer_width: String, - debuginfo: config::DebugInfo, - - // Number of cgus excluding the allocator/metadata modules - pub total_cgus: usize, - // Handler to use for diagnostics produced during codegen. - pub diag_emitter: SharedEmitter, - // LLVM passes added by plugins. - pub plugin_passes: Vec, - // LLVM optimizations for which we want to print remarks. - pub remark: Passes, - // Worker thread number - pub worker: usize, - // The incremental compilation session directory, or None if we are not - // compiling incrementally - pub incr_comp_session_dir: Option, - // Used to update CGU re-use information during the thinlto phase. - pub cgu_reuse_tracker: CguReuseTracker, - // Channel back to the main control thread to send messages to - coordinator_send: Sender>, - // A reference to the TimeGraph so we can register timings. None means that - // measuring is disabled. - time_graph: Option, - // The assembler command if no_integrated_as option is enabled, None otherwise - assembler_cmd: Option>, -} - -impl CodegenContext { - pub fn create_diag_handler(&self) -> Handler { - Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) - } - - pub(crate) fn config(&self, kind: ModuleKind) -> &ModuleConfig { - match kind { - ModuleKind::Regular => &self.regular_module_config, - ModuleKind::Metadata => &self.metadata_module_config, - ModuleKind::Allocator => &self.allocator_module_config, - } - } - - pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { - if !self.save_temps { - return - } - unsafe { - let ext = format!("{}.bc", name); - let cgu = Some(&module.name[..]); - let path = self.output_filenames.temp_path_ext(&ext, cgu); - let cstr = path2cstr(&path); - let llmod = module.module_llvm.llmod(); - llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); - } + unsafe { + let ext = format!("{}.bc", name); + let cgu = Some(&module.name[..]); + let path = cgcx.output_filenames.temp_path_ext(&ext, cgu); + let cstr = path_to_c_string(&path); + let llmod = module.module_llvm.llmod(); + llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); } } pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a Handler), + data: *mut (&'a CodegenContext, &'a Handler), llcx: &'a llvm::Context, } impl<'a> DiagnosticHandlers<'a> { - pub fn new(cgcx: &'a CodegenContext, + pub fn new(cgcx: &'a CodegenContext, handler: &'a Handler, llcx: &'a llvm::Context) -> Self { let data = Box::into_raw(Box::new((cgcx, handler))); @@ -448,10 +246,10 @@ impl<'a> Drop for DiagnosticHandlers<'a> { } } -unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, msg: &'b str, cookie: c_uint) { - cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); + cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned()); } unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, @@ -460,7 +258,7 @@ unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, if user.is_null() { return } - let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) .expect("non-UTF8 SMDiagnostic"); @@ -472,7 +270,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void if user.is_null() { return } - let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -509,9 +307,9 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void } // Unsafe due to LLVM calls. -unsafe fn optimize(cgcx: &CodegenContext, +pub(crate) unsafe fn optimize(cgcx: &CodegenContext, diag_handler: &Handler, - module: &ModuleCodegen, + module: &ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result<(), FatalError> @@ -526,7 +324,7 @@ unsafe fn optimize(cgcx: &CodegenContext, if config.emit_no_opt_bc { let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name); - let out = path2cstr(&out); + let out = path_to_c_string(&out); llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); } @@ -564,30 +362,30 @@ unsafe fn optimize(cgcx: &CodegenContext, // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise // we'll get errors in LLVM. - let using_thin_buffers = llvm::LLVMRustThinLTOAvailable() && (config.emit_bc - || config.obj_is_bitcode || config.emit_bc_compressed || config.embed_bitcode); + let using_thin_buffers = config.bitcode_needed(); let mut have_name_anon_globals_pass = false; if !config.no_prepopulate_passes { llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod); llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod); - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || (cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled()); + with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| { + llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm); + llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm); + }); + have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; if using_thin_buffers && !prepare_for_thin_lto { assert!(addpass("name-anon-globals")); have_name_anon_globals_pass = true; } - with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| { - llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm); - llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm); - }) } for pass in &config.passes { if !addpass(pass) { - diag_handler.warn(&format!("unknown pass `{}`, ignoring", - pass)); + diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass)); } if pass == "name-anon-globals" { have_name_anon_globals_pass = true; @@ -597,8 +395,8 @@ unsafe fn optimize(cgcx: &CodegenContext, for pass in &cgcx.plugin_passes { if !addpass(pass) { diag_handler.err(&format!("a plugin asked for LLVM pass \ - `{}` but LLVM does not \ - recognize it", pass)); + `{}` but LLVM does not \ + recognize it", pass)); } if pass == "name-anon-globals" { have_name_anon_globals_pass = true; @@ -609,12 +407,12 @@ unsafe fn optimize(cgcx: &CodegenContext, // As described above, this will probably cause an error in LLVM if config.no_prepopulate_passes { diag_handler.err("The current compilation is going to use thin LTO buffers \ - without running LLVM's NameAnonGlobals pass. \ - This will likely cause errors in LLVM. Consider adding \ - -C passes=name-anon-globals to the compiler command line."); + without running LLVM's NameAnonGlobals pass. \ + This will likely cause errors in LLVM. Consider adding \ + -C passes=name-anon-globals to the compiler command line."); } else { bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \ - This will likely cause errors in LLVM and should never happen."); + This will likely cause errors in LLVM and should never happen."); } } } @@ -643,37 +441,9 @@ unsafe fn optimize(cgcx: &CodegenContext, Ok(()) } -fn generate_lto_work(cgcx: &CodegenContext, - modules: Vec, - import_only_modules: Vec<(SerializedModule, WorkProduct)>) - -> Vec<(WorkItem, u64)> -{ - let mut timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(CODEGEN_WORKER_TIMELINE, - CODEGEN_WORK_PACKAGE_KIND, - "generate lto") - }).unwrap_or(Timeline::noop()); - let (lto_modules, copy_jobs) = lto::run(cgcx, modules, import_only_modules, &mut timeline) - .unwrap_or_else(|e| e.raise()); - - let lto_modules = lto_modules.into_iter().map(|module| { - let cost = module.cost(); - (WorkItem::LTO(module), cost) - }); - - let copy_jobs = copy_jobs.into_iter().map(|wp| { - (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { - name: wp.cgu_name.clone(), - source: wp, - }), 0) - }); - - lto_modules.chain(copy_jobs).collect() -} - -unsafe fn codegen(cgcx: &CodegenContext, +pub(crate) unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, - module: ModuleCodegen, + module: ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result @@ -700,9 +470,9 @@ unsafe fn codegen(cgcx: &CodegenContext, // escape the closure itself, and the manager should only be // used once. unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, - llmod: &'ll llvm::Module, - no_builtins: bool, - f: F) -> R + llmod: &'ll llvm::Module, + no_builtins: bool, + f: F) -> R where F: FnOnce(&'ll mut PassManager<'ll>) -> R, { let cpm = llvm::LLVMCreatePassManager(); @@ -729,15 +499,8 @@ unsafe fn codegen(cgcx: &CodegenContext, if write_bc || config.emit_bc_compressed || config.embed_bitcode { - let thin; - let old; - let data = if llvm::LLVMRustThinLTOAvailable() { - thin = ThinBuffer::new(llmod); - thin.data() - } else { - old = ModuleBuffer::new(llmod); - old.data() - }; + let thin = ThinBuffer::new(llmod); + let data = thin.data(); timeline.record("make-bc"); if write_bc { @@ -768,7 +531,7 @@ unsafe fn codegen(cgcx: &CodegenContext, || -> Result<(), FatalError> { if config.emit_ir { let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name); - let out = path2cstr(&out); + let out = path_to_c_string(&out); extern "C" fn demangle_callback(input_ptr: *const c_char, input_len: size_t, @@ -821,7 +584,7 @@ unsafe fn codegen(cgcx: &CodegenContext, }; with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file(diag_handler, tm, cpm, llmod, &path, - llvm::FileType::AssemblyFile) + llvm::FileType::AssemblyFile) })?; timeline.record("asm"); } @@ -829,7 +592,7 @@ unsafe fn codegen(cgcx: &CodegenContext, if write_obj { with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file(diag_handler, tm, cpm, llmod, &obj_out, - llvm::FileType::ObjectFile) + llvm::FileType::ObjectFile) })?; timeline.record("obj"); } else if asm_to_obj { @@ -885,14 +648,14 @@ unsafe fn codegen(cgcx: &CodegenContext, /// /// Basically all of this is us attempting to follow in the footsteps of clang /// on iOS. See #35968 for lots more info. -unsafe fn embed_bitcode(cgcx: &CodegenContext, +unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.module\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -909,10 +672,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = C_bytes_in_context(llcx, &[]); + let llconst = common::bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.cmdline\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -925,1260 +688,6 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); } -pub(crate) struct CompiledModules { - pub modules: Vec, - pub metadata_module: CompiledModule, - pub allocator_module: Option, -} - -fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { - sess.crate_types.borrow().contains(&config::CrateType::Rlib) && - sess.opts.output_types.contains_key(&OutputType::Exe) -} - -fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { - if sess.opts.incremental.is_none() { - return false - } - - match sess.lto() { - Lto::Fat | - Lto::No => false, - Lto::Thin | - Lto::ThinLocal => true, - } -} - -pub fn start_async_codegen(tcx: TyCtxt, - time_graph: Option, - metadata: EncodedMetadata, - coordinator_receive: Receiver>, - total_cgus: usize) - -> OngoingCodegen { - let sess = tcx.sess; - let crate_name = tcx.crate_name(LOCAL_CRATE); - let crate_hash = tcx.crate_hash(LOCAL_CRATE); - let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); - let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); - } - subsystem.to_string() - }); - - let linker_info = LinkerInfo::new(tcx); - let crate_info = CrateInfo::new(tcx); - - // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(vec![]); - let mut allocator_config = ModuleConfig::new(vec![]); - - if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { - match *sanitizer { - Sanitizer::Address => { - modules_config.passes.push("asan".to_owned()); - modules_config.passes.push("asan-module".to_owned()); - } - Sanitizer::Memory => { - modules_config.passes.push("msan".to_owned()) - } - Sanitizer::Thread => { - modules_config.passes.push("tsan".to_owned()) - } - _ => {} - } - } - - if sess.opts.debugging_opts.profile { - modules_config.passes.push("insert-gcov-profiling".to_owned()) - } - - modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); - modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); - - modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize)); - modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize)); - - // Save all versions of the bytecode if we're saving our temporaries. - if sess.opts.cg.save_temps { - modules_config.emit_no_opt_bc = true; - modules_config.emit_pre_thin_lto_bc = true; - modules_config.emit_bc = true; - modules_config.emit_lto_bc = true; - metadata_config.emit_bc = true; - allocator_config.emit_bc = true; - } - - // Emit compressed bitcode files for the crate if we're emitting an rlib. - // Whenever an rlib is created, the bitcode is inserted into the archive in - // order to allow LTO against it. - if need_crate_bitcode_for_rlib(sess) { - modules_config.emit_bc_compressed = true; - allocator_config.emit_bc_compressed = true; - } - - modules_config.emit_pre_thin_lto_bc = - need_pre_thin_lto_bitcode_for_incr_comp(sess); - - modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || - tcx.sess.target.target.options.no_integrated_as; - - for output_type in sess.opts.output_types.keys() { - match *output_type { - OutputType::Bitcode => { modules_config.emit_bc = true; } - OutputType::LlvmAssembly => { modules_config.emit_ir = true; } - OutputType::Assembly => { - modules_config.emit_asm = true; - // If we're not using the LLVM assembler, this function - // could be invoked specially with output_type_assembly, so - // in this case we still want the metadata object file. - if !sess.opts.output_types.contains_key(&OutputType::Assembly) { - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - } - } - OutputType::Object => { modules_config.emit_obj = true; } - OutputType::Metadata => { metadata_config.emit_obj = true; } - OutputType::Exe => { - modules_config.emit_obj = true; - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - }, - OutputType::Mir => {} - OutputType::DepInfo => {} - } - } - - modules_config.set_flags(sess, no_builtins); - metadata_config.set_flags(sess, no_builtins); - allocator_config.set_flags(sess, no_builtins); - - // Exclude metadata and allocator modules from time_passes output, since - // they throw off the "LLVM passes" measurement. - metadata_config.time_passes = false; - allocator_config.time_passes = false; - - let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let (codegen_worker_send, codegen_worker_receive) = channel(); - - let coordinator_thread = start_executing_work(tcx, - &crate_info, - shared_emitter, - codegen_worker_send, - coordinator_receive, - total_cgus, - sess.jobserver.clone(), - time_graph.clone(), - Arc::new(modules_config), - Arc::new(metadata_config), - Arc::new(allocator_config)); - - OngoingCodegen { - crate_name, - crate_hash, - metadata, - windows_subsystem, - linker_info, - crate_info, - - time_graph, - coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), - codegen_worker_receive, - shared_emitter_main, - future: coordinator_thread, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - } -} - -fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( - sess: &Session, - compiled_modules: &CompiledModules, -) -> FxHashMap { - let mut work_products = FxHashMap::default(); - - if sess.opts.incremental.is_none() { - return work_products; - } - - for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { - let mut files = vec![]; - - if let Some(ref path) = module.object { - files.push((WorkProductFileKind::Object, path.clone())); - } - if let Some(ref path) = module.bytecode { - files.push((WorkProductFileKind::Bytecode, path.clone())); - } - if let Some(ref path) = module.bytecode_compressed { - files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); - } - - if let Some((id, product)) = - copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { - work_products.insert(id, product); - } - } - - work_products -} - -fn produce_final_output_artifacts(sess: &Session, - compiled_modules: &CompiledModules, - crate_output: &OutputFilenames) { - let mut user_wants_bitcode = false; - let mut user_wants_objects = false; - - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &Path| { - if let Err(e) = fs::copy(from, to) { - sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); - } - }; - - let copy_if_one_unit = |output_type: OutputType, - keep_numbered: bool| { - if compiled_modules.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&compiled_modules.modules[0].name[..]); - let path = crate_output.temp_path(output_type, module_name); - copy_gracefully(&path, - &crate_output.path(output_type)); - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - remove(sess, &path); - } - } else { - let ext = crate_output.temp_path(output_type, None) - .extension() - .unwrap() - .to_str() - .unwrap() - .to_owned(); - - if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; - - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in crate_output.outputs.keys() { - match *output_type { - OutputType::Bitcode => { - user_wants_bitcode = true; - // Copy to .bc, but always keep the .0.bc. There is a later - // check to figure out if we should delete .0.bc files, or keep - // them for making an rlib. - copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | - OutputType::Metadata | - OutputType::Exe | - OutputType::DepInfo => {} - } - } - - // Clean up unwanted temporary files. - - // We create the following files by default: - // - #crate#.#module-name#.bc - // - #crate#.#module-name#.o - // - #crate#.crate.metadata.bc - // - #crate#.crate.metadata.o - // - #crate#.o (linked from crate.##.o) - // - #crate#.bc (copied from crate.##.bc) - // We may create additional files if requested by the user (through - // `-C save-temps` or `--emit=` flags). - - if !sess.opts.cg.save_temps { - // Remove the temporary .#module-name#.o objects. If the user didn't - // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .#module-name#.bc as - // well. - - // Specific rules for keeping .#module-name#.bc: - // - If the user requested bitcode (`user_wants_bitcode`), and - // codegen_units > 1, then keep it. - // - If the user requested bitcode but codegen_units == 1, then we - // can toss .#module-name#.bc because we copied it to .bc earlier. - // - If we're not building an rlib and the user didn't request - // bitcode, then delete .#module-name#.bc. - // If you change how this works, also update back::link::link_rlib, - // where .#module-name#.bc files are (maybe) deleted after making an - // rlib. - let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); - - let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; - - let keep_numbered_objects = needs_crate_object || - (user_wants_objects && sess.codegen_units() > 1); - - for module in compiled_modules.modules.iter() { - if let Some(ref path) = module.object { - if !keep_numbered_objects { - remove(sess, path); - } - } - - if let Some(ref path) = module.bytecode { - if !keep_numbered_bitcode { - remove(sess, path); - } - } - } - - if !user_wants_bitcode { - if let Some(ref path) = compiled_modules.metadata_module.bytecode { - remove(sess, &path); - } - - if let Some(ref allocator_module) = compiled_modules.allocator_module { - if let Some(ref path) = allocator_module.bytecode { - remove(sess, path); - } - } - } - } - - // We leave the following files around by default: - // - #crate#.o - // - #crate#.crate.metadata.o - // - #crate#.bc - // These are used in linking steps and will be cleaned up afterward. -} - -pub(crate) fn dump_incremental_data(_codegen_results: &CodegenResults) { - // FIXME(mw): This does not work at the moment because the situation has - // become more complicated due to incremental LTO. Now a CGU - // can have more than two caching states. - // println!("[incremental] Re-using {} out of {} modules", - // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), - // codegen_results.modules.len()); -} - -enum WorkItem { - /// Optimize a newly codegened, totally unoptimized module. - Optimize(ModuleCodegen), - /// Copy the post-LTO artifacts from the incremental cache to the output - /// directory. - CopyPostLtoArtifacts(CachedModuleCodegen), - /// Perform (Thin)LTO on the given module. - LTO(lto::LtoModuleCodegen), -} - -impl WorkItem { - fn module_kind(&self) -> ModuleKind { - match *self { - WorkItem::Optimize(ref m) => m.kind, - WorkItem::CopyPostLtoArtifacts(_) | - WorkItem::LTO(_) => ModuleKind::Regular, - } - } - - fn name(&self) -> String { - match *self { - WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), - WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), - WorkItem::LTO(ref m) => format!("lto: {}", m.name()), - } - } -} - -enum WorkItemResult { - Compiled(CompiledModule), - NeedsLTO(ModuleCodegen), -} - -fn execute_work_item(cgcx: &CodegenContext, - work_item: WorkItem, - timeline: &mut Timeline) - -> Result -{ - let module_config = cgcx.config(work_item.module_kind()); - - match work_item { - WorkItem::Optimize(module) => { - execute_optimize_work_item(cgcx, module, module_config, timeline) - } - WorkItem::CopyPostLtoArtifacts(module) => { - execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) - } - WorkItem::LTO(module) => { - execute_lto_work_item(cgcx, module, module_config, timeline) - } - } -} - -fn execute_optimize_work_item(cgcx: &CodegenContext, - module: ModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - optimize(cgcx, &diag_handler, &module, module_config, timeline)?; - } - - let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); - - // After we've done the initial round of optimizations we need to - // decide whether to synchronously codegen this module or ship it - // back to the coordinator thread for further LTO processing (which - // has to wait for all the initial modules to be optimized). - // - // Here we dispatch based on the `cgcx.lto` and kind of module we're - // codegenning... - let needs_lto = match cgcx.lto { - Lto::No => false, - - // If the linker does LTO, we don't have to do it. Note that we - // keep doing full LTO, if it is requested, as not to break the - // assumption that the output will be a single module. - Lto::Thin | Lto::ThinLocal if linker_does_lto => false, - - // Here we've got a full crate graph LTO requested. We ignore - // this, however, if the crate type is only an rlib as there's - // no full crate graph to process, that'll happen later. - // - // This use case currently comes up primarily for targets that - // require LTO so the request for LTO is always unconditionally - // passed down to the backend, but we don't actually want to do - // anything about it yet until we've got a final product. - Lto::Fat | Lto::Thin => { - cgcx.crate_types.len() != 1 || - cgcx.crate_types[0] != config::CrateType::Rlib - } - - // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator modules if - // it shows up. This is due to various linker shenanigans that - // we'll encounter later. - // - // Additionally here's where we also factor in the current LLVM - // version. If it doesn't support ThinLTO we skip this. - Lto::ThinLocal => { - module.kind != ModuleKind::Allocator && - unsafe { llvm::LLVMRustThinLTOAvailable() } - } - }; - - // Metadata modules never participate in LTO regardless of the lto - // settings. - let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; - - if needs_lto { - Ok(WorkItemResult::NeedsLTO(module)) - } else { - let module = unsafe { - codegen(cgcx, &diag_handler, module, module_config, timeline)? - }; - Ok(WorkItemResult::Compiled(module)) - } -} - -fn execute_copy_from_cache_work_item(cgcx: &CodegenContext, - module: CachedModuleCodegen, - module_config: &ModuleConfig, - _: &mut Timeline) - -> Result -{ - let incr_comp_session_dir = cgcx.incr_comp_session_dir - .as_ref() - .unwrap(); - let mut object = None; - let mut bytecode = None; - let mut bytecode_compressed = None; - for (kind, saved_file) in &module.source.saved_files { - let obj_out = match kind { - WorkProductFileKind::Object => { - let path = cgcx.output_filenames.temp_path(OutputType::Object, - Some(&module.name)); - object = Some(path.clone()); - path - } - WorkProductFileKind::Bytecode => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)); - bytecode = Some(path.clone()); - path - } - WorkProductFileKind::BytecodeCompressed => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)) - .with_extension(RLIB_BYTECODE_EXTENSION); - bytecode_compressed = Some(path.clone()); - path - } - }; - let source_file = in_incr_comp_dir(&incr_comp_session_dir, - &saved_file); - debug!("copying pre-existing module `{}` from {:?} to {}", - module.name, - source_file, - obj_out.display()); - match link_or_copy(&source_file, &obj_out) { - Ok(_) => { } - Err(err) => { - let diag_handler = cgcx.create_diag_handler(); - diag_handler.err(&format!("unable to copy {} to {}: {}", - source_file.display(), - obj_out.display(), - err)); - } - } - } - - assert_eq!(object.is_some(), module_config.emit_obj); - assert_eq!(bytecode.is_some(), module_config.emit_bc); - assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); - - Ok(WorkItemResult::Compiled(CompiledModule { - name: module.name, - kind: ModuleKind::Regular, - object, - bytecode, - bytecode_compressed, - })) -} - -fn execute_lto_work_item(cgcx: &CodegenContext, - mut module: lto::LtoModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - let module = module.optimize(cgcx, timeline)?; - let module = codegen(cgcx, &diag_handler, module, module_config, timeline)?; - Ok(WorkItemResult::Compiled(module)) - } -} - -enum Message { - Token(io::Result), - NeedsLTO { - result: ModuleCodegen, - worker_id: usize, - }, - Done { - result: Result, - worker_id: usize, - }, - CodegenDone { - llvm_work_item: WorkItem, - cost: u64, - }, - AddImportOnlyModule { - module_data: SerializedModule, - work_product: WorkProduct, - }, - CodegenComplete, - CodegenItem, -} - -struct Diagnostic { - msg: String, - code: Option, - lvl: Level, -} - -#[derive(PartialEq, Clone, Copy, Debug)] -enum MainThreadWorkerState { - Idle, - Codegenning, - LLVMing, -} - -fn start_executing_work(tcx: TyCtxt, - crate_info: &CrateInfo, - shared_emitter: SharedEmitter, - codegen_worker_send: Sender, - coordinator_receive: Receiver>, - total_cgus: usize, - jobserver: Client, - time_graph: Option, - modules_config: Arc, - metadata_config: Arc, - allocator_config: Arc) - -> thread::JoinHandle> { - let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); - let sess = tcx.sess; - - // Compute the set of symbols we need to retain when doing LTO (if we need to) - let exported_symbols = { - let mut exported_symbols = FxHashMap::default(); - - let copy_symbols = |cnum| { - let symbols = tcx.exported_symbols(cnum) - .iter() - .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) - .collect(); - Arc::new(symbols) - }; - - match sess.lto() { - Lto::No => None, - Lto::ThinLocal => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - Some(Arc::new(exported_symbols)) - } - Lto::Fat | Lto::Thin => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - for &cnum in tcx.crates().iter() { - exported_symbols.insert(cnum, copy_symbols(cnum)); - } - Some(Arc::new(exported_symbols)) - } - } - }; - - // First up, convert our jobserver into a helper thread so we can use normal - // mpsc channels to manage our messages and such. - // After we've requested tokens then we'll, when we can, - // get tokens on `coordinator_receive` which will - // get managed in the main loop below. - let coordinator_send2 = coordinator_send.clone(); - let helper = jobserver.into_helper_thread(move |token| { - drop(coordinator_send2.send(Box::new(Message::Token(token)))); - }).expect("failed to spawn helper thread"); - - let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { - if link::ignored_for_lto(sess, crate_info, cnum) { - return - } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); - })); - - let assembler_cmd = if modules_config.no_integrated_as { - // HACK: currently we use linker (gcc) as our assembler - let (linker, flavor) = link::linker_and_flavor(sess); - - let (name, mut cmd) = get_linker(sess, &linker, flavor); - cmd.args(&sess.target.target.options.asm_args); - Some(Arc::new(AssemblerCommand { - name, - cmd, - })) - } else { - None - }; - - let cgcx = CodegenContext { - crate_types: sess.crate_types.borrow().clone(), - each_linked_rlib_for_lto, - lto: sess.lto(), - no_landing_pads: sess.no_landing_pads(), - fewer_names: sess.fewer_names(), - save_temps: sess.opts.cg.save_temps, - opts: Arc::new(sess.opts.clone()), - time_passes: sess.time_passes(), - exported_symbols, - plugin_passes: sess.plugin_llvm_passes.borrow().clone(), - remark: sess.opts.cg.remark.clone(), - worker: 0, - incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), - cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), - coordinator_send, - diag_emitter: shared_emitter.clone(), - time_graph, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - regular_module_config: modules_config, - metadata_module_config: metadata_config, - allocator_module_config: allocator_config, - tm_factory: target_machine_factory(tcx.sess, false), - total_cgus, - msvc_imps_needed: msvc_imps_needed(tcx), - target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), - debuginfo: tcx.sess.opts.debuginfo, - assembler_cmd, - }; - - // This is the "main loop" of parallel work happening for parallel codegen. - // It's here that we manage parallelism, schedule work, and work with - // messages coming from clients. - // - // There are a few environmental pre-conditions that shape how the system - // is set up: - // - // - Error reporting only can happen on the main thread because that's the - // only place where we have access to the compiler `Session`. - // - LLVM work can be done on any thread. - // - Codegen can only happen on the main thread. - // - Each thread doing substantial work most be in possession of a `Token` - // from the `Jobserver`. - // - The compiler process always holds one `Token`. Any additional `Tokens` - // have to be requested from the `Jobserver`. - // - // Error Reporting - // =============== - // The error reporting restriction is handled separately from the rest: We - // set up a `SharedEmitter` the holds an open channel to the main thread. - // When an error occurs on any thread, the shared emitter will send the - // error message to the receiver main thread (`SharedEmitterMain`). The - // main thread will periodically query this error message queue and emit - // any error messages it has received. It might even abort compilation if - // has received a fatal error. In this case we rely on all other threads - // being torn down automatically with the main thread. - // Since the main thread will often be busy doing codegen work, error - // reporting will be somewhat delayed, since the message queue can only be - // checked in between to work packages. - // - // Work Processing Infrastructure - // ============================== - // The work processing infrastructure knows three major actors: - // - // - the coordinator thread, - // - the main thread, and - // - LLVM worker threads - // - // The coordinator thread is running a message loop. It instructs the main - // thread about what work to do when, and it will spawn off LLVM worker - // threads as open LLVM WorkItems become available. - // - // The job of the main thread is to codegen CGUs into LLVM work package - // (since the main thread is the only thread that can do this). The main - // thread will block until it receives a message from the coordinator, upon - // which it will codegen one CGU, send it to the coordinator and block - // again. This way the coordinator can control what the main thread is - // doing. - // - // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is - // available, it will spawn off a new LLVM worker thread and let it process - // that a WorkItem. When a LLVM worker thread is done with its WorkItem, - // it will just shut down, which also frees all resources associated with - // the given LLVM module, and sends a message to the coordinator that the - // has been completed. - // - // Work Scheduling - // =============== - // The scheduler's goal is to minimize the time it takes to complete all - // work there is, however, we also want to keep memory consumption low - // if possible. These two goals are at odds with each other: If memory - // consumption were not an issue, we could just let the main thread produce - // LLVM WorkItems at full speed, assuring maximal utilization of - // Tokens/LLVM worker threads. However, since codegen usual is faster - // than LLVM processing, the queue of LLVM WorkItems would fill up and each - // WorkItem potentially holds on to a substantial amount of memory. - // - // So the actual goal is to always produce just enough LLVM WorkItems as - // not to starve our LLVM worker threads. That means, once we have enough - // WorkItems in our queue, we can block the main thread, so it does not - // produce more until we need them. - // - // Doing LLVM Work on the Main Thread - // ---------------------------------- - // Since the main thread owns the compiler processes implicit `Token`, it is - // wasteful to keep it blocked without doing any work. Therefore, what we do - // in this case is: We spawn off an additional LLVM worker thread that helps - // reduce the queue. The work it is doing corresponds to the implicit - // `Token`. The coordinator will mark the main thread as being busy with - // LLVM work. (The actual work happens on another OS thread but we just care - // about `Tokens`, not actual threads). - // - // When any LLVM worker thread finishes while the main thread is marked as - // "busy with LLVM work", we can do a little switcheroo: We give the Token - // of the just finished thread to the LLVM worker thread that is working on - // behalf of the main thread's implicit Token, thus freeing up the main - // thread again. The coordinator can then again decide what the main thread - // should do. This allows the coordinator to make decisions at more points - // in time. - // - // Striking a Balance between Throughput and Memory Consumption - // ------------------------------------------------------------ - // Since our two goals, (1) use as many Tokens as possible and (2) keep - // memory consumption as low as possible, are in conflict with each other, - // we have to find a trade off between them. Right now, the goal is to keep - // all workers busy, which means that no worker should find the queue empty - // when it is ready to start. - // How do we do achieve this? Good question :) We actually never know how - // many `Tokens` are potentially available so it's hard to say how much to - // fill up the queue before switching the main thread to LLVM work. Also we - // currently don't have a means to estimate how long a running LLVM worker - // will still be busy with it's current WorkItem. However, we know the - // maximal count of available Tokens that makes sense (=the number of CPU - // cores), so we can take a conservative guess. The heuristic we use here - // is implemented in the `queue_full_enough()` function. - // - // Some Background on Jobservers - // ----------------------------- - // It's worth also touching on the management of parallelism here. We don't - // want to just spawn a thread per work item because while that's optimal - // parallelism it may overload a system with too many threads or violate our - // configuration for the maximum amount of cpu to use for this process. To - // manage this we use the `jobserver` crate. - // - // Job servers are an artifact of GNU make and are used to manage - // parallelism between processes. A jobserver is a glorified IPC semaphore - // basically. Whenever we want to run some work we acquire the semaphore, - // and whenever we're done with that work we release the semaphore. In this - // manner we can ensure that the maximum number of parallel workers is - // capped at any one point in time. - // - // LTO and the coordinator thread - // ------------------------------ - // - // The final job the coordinator thread is responsible for is managing LTO - // and how that works. When LTO is requested what we'll to is collect all - // optimized LLVM modules into a local vector on the coordinator. Once all - // modules have been codegened and optimized we hand this to the `lto` - // module for further optimization. The `lto` module will return back a list - // of more modules to work on, which the coordinator will continue to spawn - // work for. - // - // Each LLVM module is automatically sent back to the coordinator for LTO if - // necessary. There's already optimizations in place to avoid sending work - // back to the coordinator if LTO isn't requested. - return thread::spawn(move || { - // We pretend to be within the top-level LLVM time-passes task here: - set_time_depth(1); - - let max_workers = ::num_cpus::get(); - let mut worker_id_counter = 0; - let mut free_worker_ids = Vec::new(); - let mut get_worker_id = |free_worker_ids: &mut Vec| { - if let Some(id) = free_worker_ids.pop() { - id - } else { - let id = worker_id_counter; - worker_id_counter += 1; - id - } - }; - - // This is where we collect codegen units that have gone all the way - // through codegen and LLVM. - let mut compiled_modules = vec![]; - let mut compiled_metadata_module = None; - let mut compiled_allocator_module = None; - let mut needs_lto = Vec::new(); - let mut lto_import_only_modules = Vec::new(); - let mut started_lto = false; - - // This flag tracks whether all items have gone through codegens - let mut codegen_done = false; - - // This is the queue of LLVM work items that still need processing. - let mut work_items = Vec::<(WorkItem, u64)>::new(); - - // This are the Jobserver Tokens we currently hold. Does not include - // the implicit Token the compiler process owns no matter what. - let mut tokens = Vec::new(); - - let mut main_thread_worker_state = MainThreadWorkerState::Idle; - let mut running = 0; - - let mut llvm_start_time = None; - - // Run the message loop while there's still anything that needs message - // processing: - while !codegen_done || - work_items.len() > 0 || - running > 0 || - needs_lto.len() > 0 || - lto_import_only_modules.len() > 0 || - main_thread_worker_state != MainThreadWorkerState::Idle { - - // While there are still CGUs to be codegened, the coordinator has - // to decide how to utilize the compiler processes implicit Token: - // For codegenning more CGU or for running them through LLVM. - if !codegen_done { - if main_thread_worker_state == MainThreadWorkerState::Idle { - if !queue_full_enough(work_items.len(), running, max_workers) { - // The queue is not full enough, codegen more items: - if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { - panic!("Could not send Message::CodegenItem to main thread") - } - main_thread_worker_state = MainThreadWorkerState::Codegenning; - } else { - // The queue is full enough to not let the worker - // threads starve. Use the implicit Token to do some - // LLVM work too. - let (item, _) = work_items.pop() - .expect("queue empty - queue_full_enough() broken?"); - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } - } - } else { - // If we've finished everything related to normal codegen - // then it must be the case that we've got some LTO work to do. - // Perform the serial work here of figuring out what we're - // going to LTO and then push a bunch of work items onto our - // queue to do LTO - if work_items.len() == 0 && - running == 0 && - main_thread_worker_state == MainThreadWorkerState::Idle { - assert!(!started_lto); - assert!(needs_lto.len() + lto_import_only_modules.len() > 0); - started_lto = true; - let modules = mem::replace(&mut needs_lto, Vec::new()); - let import_only_modules = - mem::replace(&mut lto_import_only_modules, Vec::new()); - for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { - let insertion_index = work_items - .binary_search_by_key(&cost, |&(_, cost)| cost) - .unwrap_or_else(|e| e); - work_items.insert(insertion_index, (work, cost)); - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - } - } - - // In this branch, we know that everything has been codegened, - // so it's just a matter of determining whether the implicit - // Token is free to use for LLVM work. - match main_thread_worker_state { - MainThreadWorkerState::Idle => { - if let Some((item, _)) = work_items.pop() { - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } else { - // There is no unstarted work, so let the main thread - // take over for a running worker. Otherwise the - // implicit token would just go to waste. - // We reduce the `running` counter by one. The - // `tokens.truncate()` below will take care of - // giving the Token back. - debug_assert!(running > 0); - running -= 1; - main_thread_worker_state = MainThreadWorkerState::LLVMing; - } - } - MainThreadWorkerState::Codegenning => { - bug!("codegen worker should not be codegenning after \ - codegen was already completed") - } - MainThreadWorkerState::LLVMing => { - // Already making good use of that token - } - } - } - - // Spin up what work we can, only doing this while we've got available - // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() { - let (item, _) = work_items.pop().unwrap(); - - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - - spawn_work(cgcx, item); - running += 1; - } - - // Relinquish accidentally acquired extra tokens - tokens.truncate(running); - - let msg = coordinator_receive.recv().unwrap(); - match *msg.downcast::().ok().unwrap() { - // Save the token locally and the next turn of the loop will use - // this to spawn a new unit of work, or it may get dropped - // immediately if we have no more work to spawn. - Message::Token(token) => { - match token { - Ok(token) => { - tokens.push(token); - - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - // If the main thread token is used for LLVM work - // at the moment, we turn that thread into a regular - // LLVM worker thread, so the main thread is free - // to react to codegen demand. - main_thread_worker_state = MainThreadWorkerState::Idle; - running += 1; - } - } - Err(e) => { - let msg = &format!("failed to acquire jobserver token: {}", e); - shared_emitter.fatal(msg); - // Exit the coordinator thread - panic!("{}", msg) - } - } - } - - Message::CodegenDone { llvm_work_item, cost } => { - // We keep the queue sorted by estimated processing cost, - // so that more expensive items are processed earlier. This - // is good for throughput as it gives the main thread more - // time to fill up the queue and it avoids scheduling - // expensive items to the end. - // Note, however, that this is not ideal for memory - // consumption, as LLVM module sizes are not evenly - // distributed. - let insertion_index = - work_items.binary_search_by_key(&cost, |&(_, cost)| cost); - let insertion_index = match insertion_index { - Ok(idx) | Err(idx) => idx - }; - work_items.insert(insertion_index, (llvm_work_item, cost)); - - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - Message::CodegenComplete => { - codegen_done = true; - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" - // - // Note that if the thread failed that means it panicked, so we - // abort immediately. - Message::Done { result: Ok(compiled_module), worker_id } => { - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - - free_worker_ids.push(worker_id); - - match compiled_module.kind { - ModuleKind::Regular => { - compiled_modules.push(compiled_module); - } - ModuleKind::Metadata => { - assert!(compiled_metadata_module.is_none()); - compiled_metadata_module = Some(compiled_module); - } - ModuleKind::Allocator => { - assert!(compiled_allocator_module.is_none()); - compiled_allocator_module = Some(compiled_module); - } - } - } - Message::NeedsLTO { result, worker_id } => { - assert!(!started_lto); - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - free_worker_ids.push(worker_id); - needs_lto.push(result); - } - Message::AddImportOnlyModule { module_data, work_product } => { - assert!(!started_lto); - assert!(!codegen_done); - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - lto_import_only_modules.push((module_data, work_product)); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - Message::Done { result: Err(()), worker_id: _ } => { - bug!("worker thread panicked"); - } - Message::CodegenItem => { - bug!("the coordinator should not receive codegen requests") - } - } - } - - if let Some(llvm_start_time) = llvm_start_time { - let total_llvm_time = Instant::now().duration_since(llvm_start_time); - // This is the top-level timing for all of LLVM, set the time-depth - // to zero. - set_time_depth(0); - print_time_passes_entry(cgcx.time_passes, - "LLVM passes", - total_llvm_time); - } - - // Regardless of what order these modules completed in, report them to - // the backend in the same order every time to ensure that we're handing - // out deterministic results. - compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); - - let compiled_metadata_module = compiled_metadata_module - .expect("Metadata module not compiled?"); - - Ok(CompiledModules { - modules: compiled_modules, - metadata_module: compiled_metadata_module, - allocator_module: compiled_allocator_module, - }) - }); - - // A heuristic that determines if we have enough LLVM WorkItems in the - // queue so that the main thread can do LLVM work instead of codegen - fn queue_full_enough(items_in_queue: usize, - workers_running: usize, - max_workers: usize) -> bool { - // Tune me, plz. - items_in_queue > 0 && - items_in_queue >= max_workers.saturating_sub(workers_running / 2) - } - - fn maybe_start_llvm_timer(config: &ModuleConfig, - llvm_start_time: &mut Option) { - // We keep track of the -Ztime-passes output manually, - // since the closure-based interface does not fit well here. - if config.time_passes { - if llvm_start_time.is_none() { - *llvm_start_time = Some(Instant::now()); - } - } - } -} - -pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; -pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = - time_graph::TimelineId(CODEGEN_WORKER_ID); -pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); -const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); - -fn spawn_work(cgcx: CodegenContext, work: WorkItem) { - let depth = time_depth(); - - thread::spawn(move || { - set_time_depth(depth); - - // Set up a destructor which will fire off a message that we're done as - // we exit. - struct Bomb { - coordinator_send: Sender>, - result: Option, - worker_id: usize, - } - impl Drop for Bomb { - fn drop(&mut self) { - let worker_id = self.worker_id; - let msg = match self.result.take() { - Some(WorkItemResult::Compiled(m)) => { - Message::Done { result: Ok(m), worker_id } - } - Some(WorkItemResult::NeedsLTO(m)) => { - Message::NeedsLTO { result: m, worker_id } - } - None => Message::Done { result: Err(()), worker_id } - }; - drop(self.coordinator_send.send(Box::new(msg))); - } - } - - let mut bomb = Bomb { - coordinator_send: cgcx.coordinator_send.clone(), - result: None, - worker_id: cgcx.worker, - }; - - // Execute the work itself, and if it finishes successfully then flag - // ourselves as a success as well. - // - // Note that we ignore any `FatalError` coming out of `execute_work_item`, - // as a diagnostic was already sent off to the main thread - just - // surface that there was an error in this worker. - bomb.result = { - let timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND, - &work.name()) - }); - let mut timeline = timeline.unwrap_or(Timeline::noop()); - execute_work_item(&cgcx, work, &mut timeline).ok() - }; - }); -} - -pub fn run_assembler(cgcx: &CodegenContext, handler: &Handler, assembly: &Path, object: &Path) { - let assembler = cgcx.assembler_cmd - .as_ref() - .expect("cgcx.assembler_cmd is missing?"); - - let pname = &assembler.name; - let mut cmd = assembler.cmd.clone(); - cmd.arg("-c").arg("-o").arg(object).arg(assembly); - debug!("{:?}", cmd); - - match cmd.output() { - Ok(prog) => { - if !prog.status.success() { - let mut note = prog.stderr.clone(); - note.extend_from_slice(&prog.stdout); - - handler.struct_err(&format!("linking with `{}` failed: {}", - pname.display(), - prog.status)) - .note(&format!("{:?}", &cmd)) - .note(str::from_utf8(¬e[..]).unwrap()) - .emit(); - handler.abort_if_errors(); - } - }, - Err(e) => { - handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); - handler.abort_if_errors(); - } - } -} - pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, config: &ModuleConfig, opt_level: llvm::CodeGenOptLevel, @@ -2190,7 +699,7 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, // reasonable defaults and prepare it to actually populate the pass // manager. let builder = llvm::LLVMPassManagerBuilderCreate(); - let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone); + let opt_size = config.opt_size.map(get_llvm_opt_size).unwrap_or(llvm::CodeGenOptSizeNone); let inline_threshold = config.inline_threshold; let pgo_gen_path = config.pgo_gen.as_ref().map(|s| { @@ -2258,277 +767,16 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, llvm::LLVMPassManagerBuilderDispose(builder); } - -enum SharedEmitterMessage { - Diagnostic(Diagnostic), - InlineAsmError(u32, String), - AbortIfErrors, - Fatal(String), -} - -#[derive(Clone)] -pub struct SharedEmitter { - sender: Sender, -} - -pub struct SharedEmitterMain { - receiver: Receiver, -} - -impl SharedEmitter { - pub fn new() -> (SharedEmitter, SharedEmitterMain) { - let (sender, receiver) = channel(); - - (SharedEmitter { sender }, SharedEmitterMain { receiver }) - } - - fn inline_asm_error(&self, cookie: u32, msg: String) { - drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); - } - - fn fatal(&self, msg: &str) { - drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); - } -} - -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }))); - for child in &db.children { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }))); - } - drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); - } -} - -impl SharedEmitterMain { - pub fn check(&self, sess: &Session, blocking: bool) { - loop { - let message = if blocking { - match self.receiver.recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - } else { - match self.receiver.try_recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - }; - - match message { - Ok(SharedEmitterMessage::Diagnostic(diag)) => { - let handler = sess.diagnostic(); - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - code.clone(), - diag.lvl); - } - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); - } - } - } - Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, &msg), - None => sess.err(&msg), - } - } - Ok(SharedEmitterMessage::AbortIfErrors) => { - sess.abort_if_errors(); - } - Ok(SharedEmitterMessage::Fatal(msg)) => { - sess.fatal(&msg); - } - Err(_) => { - break; - } - } - - } - } -} - -pub struct OngoingCodegen { - crate_name: Symbol, - crate_hash: Svh, - metadata: EncodedMetadata, - windows_subsystem: Option, - linker_info: LinkerInfo, - crate_info: CrateInfo, - time_graph: Option, - coordinator_send: Sender>, - codegen_worker_receive: Receiver, - shared_emitter_main: SharedEmitterMain, - future: thread::JoinHandle>, - output_filenames: Arc, -} - -impl OngoingCodegen { - pub(crate) fn join( - self, - sess: &Session - ) -> (CodegenResults, FxHashMap) { - self.shared_emitter_main.check(sess, true); - let compiled_modules = match self.future.join() { - Ok(Ok(compiled_modules)) => compiled_modules, - Ok(Err(())) => { - sess.abort_if_errors(); - panic!("expected abort due to worker thread errors") - }, - Err(_) => { - bug!("panic during codegen/LLVM phase"); - } - }; - - sess.cgu_reuse_tracker.check_expected_reuse(sess); - - sess.abort_if_errors(); - - if let Some(time_graph) = self.time_graph { - time_graph.dump(&format!("{}-timings", self.crate_name)); - } - - let work_products = - copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, - &compiled_modules); - produce_final_output_artifacts(sess, - &compiled_modules, - &self.output_filenames); - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.codegen_units() == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } - - (CodegenResults { - crate_name: self.crate_name, - crate_hash: self.crate_hash, - metadata: self.metadata, - windows_subsystem: self.windows_subsystem, - linker_info: self.linker_info, - crate_info: self.crate_info, - - modules: compiled_modules.modules, - allocator_module: compiled_modules.allocator_module, - metadata_module: compiled_modules.metadata_module, - }, work_products) - } - - pub(crate) fn submit_pre_codegened_module_to_llvm(&self, - tcx: TyCtxt, - module: ModuleCodegen) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - - // These are generally cheap and won't through off scheduling. - let cost = 0; - submit_codegened_module_to_llvm(tcx, module, cost); - } - - pub fn codegen_finished(&self, tcx: TyCtxt) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - drop(self.coordinator_send.send(Box::new(Message::CodegenComplete))); - } - - pub fn check_for_errors(&self, sess: &Session) { - self.shared_emitter_main.check(sess, false); - } - - pub fn wait_for_signal_to_codegen_item(&self) { - match self.codegen_worker_receive.recv() { - Ok(Message::CodegenItem) => { - // Nothing to do - } - Ok(_) => panic!("unexpected message"), - Err(_) => { - // One of the LLVM threads must have panicked, fall through so - // error handling can be reached. - } - } - } -} - -pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt, - module: ModuleCodegen, - cost: u64) { - let llvm_work_item = WorkItem::Optimize(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost, - }))); -} - -pub(crate) fn submit_post_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost: 0, - }))); -} - -pub(crate) fn submit_pre_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let filename = pre_lto_bitcode_filename(&module.name); - let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); - let file = fs::File::open(&bc_path).unwrap_or_else(|e| { - panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) - }); - - let mmap = unsafe { - memmap::Mmap::map(&file).unwrap_or_else(|e| { - panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) - }) - }; - - // Schedule the module to be loaded - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::AddImportOnlyModule { - module_data: SerializedModule::FromUncompressedFile(mmap), - work_product: module.source, - }))); -} - -pub(super) fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) -} - -fn msvc_imps_needed(tcx: TyCtxt) -> bool { - // This should never be true (because it's not supported). If it is true, - // something is wrong with commandline arg validation. - assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.opts.cg.prefer_dynamic)); - - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() -} - // Create a `__imp_ = &symbol` global for every public static `symbol`. // This is required to satisfy `dllimport` references to static data in .rlibs // when using MSVC linker. We do this only for data, as linker can fix up // code references on its own. // See #26591, #27438 -fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module) { +fn create_msvc_imps( + cgcx: &CodegenContext, + llcx: &llvm::Context, + llmod: &llvm::Module +) { if !cgcx.msvc_imps_needed { return } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index a9119d49e8..904e5d74f8 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -24,589 +24,39 @@ //! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. use super::ModuleLlvm; -use super::ModuleCodegen; -use super::ModuleKind; -use super::CachedModuleCodegen; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::base::maybe_create_entry_wrapper; +use super::LlvmCodegenBackend; -use abi; -use back::write::{self, OngoingCodegen}; -use llvm::{self, TypeKind, get_param}; +use llvm; use metadata; -use rustc::dep_graph::cgu_reuse_tracker::CguReuse; -use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use rustc::middle::lang_items::StartFnLangItem; -use rustc::middle::weak_lang_items; -use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder}; +use rustc::mir::mono::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata}; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; -use rustc::ty::query::Providers; -use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::ty::TyCtxt; use rustc::middle::exported_symbols; -use rustc::util::common::{time, print_time_passes_entry}; -use rustc::util::profiling::ProfileCategory; -use rustc::session::config::{self, DebugInfo, EntryFnType, Lto}; -use rustc::session::Session; -use rustc_incremental; -use allocator; -use mir::place::PlaceRef; -use attributes; -use builder::{Builder, MemFlags}; -use callee; -use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; -use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; -use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, C_struct_in_context, C_array, val_ty}; -use consts; +use rustc::session::config::{self, DebugInfo}; +use builder::Builder; +use common; use context::CodegenCx; -use debuginfo; -use declare; -use meth; -use mir; -use monomorphize::Instance; -use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; -use rustc_codegen_utils::symbol_names_test; -use time_graph; -use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt}; -use type_::Type; -use type_of::LayoutLlvmExt; -use rustc::util::nodemap::{FxHashMap, DefIdSet}; -use CrateInfo; +use monomorphize::partitioning::CodegenUnitExt; +use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::sync::Lrc; -use std::any::Any; +use rustc_codegen_ssa::traits::*; +use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm; + use std::ffi::CString; -use std::sync::Arc; -use std::time::{Instant, Duration}; -use std::i32; -use std::cmp; -use std::sync::mpsc; -use syntax_pos::Span; +use std::time::Instant; use syntax_pos::symbol::InternedString; -use syntax::attr; -use rustc::hir::{self, CodegenFnAttrs}; +use rustc::hir::CodegenFnAttrs; use value::Value; -use mir::operand::OperandValue; -use rustc_codegen_utils::check_for_rustc_errors_attr; - -pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll> { - cx: &'a CodegenCx<'ll, 'tcx>, - name: Option, - istart: usize, -} - -impl StatRecorder<'a, 'll, 'tcx> { - pub fn new(cx: &'a CodegenCx<'ll, 'tcx>, name: String) -> Self { - let istart = cx.stats.borrow().n_llvm_insns; - StatRecorder { - cx, - name: Some(name), - istart, - } - } -} - -impl Drop for StatRecorder<'a, 'll, 'tcx> { - fn drop(&mut self) { - if self.cx.sess().codegen_stats() { - let mut stats = self.cx.stats.borrow_mut(); - let iend = stats.n_llvm_insns; - stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); - stats.n_fns += 1; - // Reset LLVM insn count to avoid compound costs. - stats.n_llvm_insns = self.istart; - } - } -} - -pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, - signed: bool) - -> llvm::IntPredicate { - match op { - hir::BinOpKind::Eq => llvm::IntEQ, - hir::BinOpKind::Ne => llvm::IntNE, - hir::BinOpKind::Lt => if signed { llvm::IntSLT } else { llvm::IntULT }, - hir::BinOpKind::Le => if signed { llvm::IntSLE } else { llvm::IntULE }, - hir::BinOpKind::Gt => if signed { llvm::IntSGT } else { llvm::IntUGT }, - hir::BinOpKind::Ge => if signed { llvm::IntSGE } else { llvm::IntUGE }, - op => { - bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ - found {:?}", - op) - } - } -} - -pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate { - match op { - hir::BinOpKind::Eq => llvm::RealOEQ, - hir::BinOpKind::Ne => llvm::RealUNE, - hir::BinOpKind::Lt => llvm::RealOLT, - hir::BinOpKind::Le => llvm::RealOLE, - hir::BinOpKind::Gt => llvm::RealOGT, - hir::BinOpKind::Ge => llvm::RealOGE, - op => { - bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ - found {:?}", - op); - } - } -} - -pub fn compare_simd_types( - bx: &Builder<'a, 'll, 'tcx>, - lhs: &'ll Value, - rhs: &'ll Value, - t: Ty<'tcx>, - ret_ty: &'ll Type, - op: hir::BinOpKind -) -> &'ll Value { - let signed = match t.sty { - ty::Float(_) => { - let cmp = bin_op_to_fcmp_predicate(op); - return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); - }, - ty::Uint(_) => false, - ty::Int(_) => true, - _ => bug!("compare_simd_types: invalid SIMD type"), - }; - - let cmp = bin_op_to_icmp_predicate(op, signed); - // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension - // to get the correctly sized type. This will compile to a single instruction - // once the IR is converted to assembly if the SIMD instruction is supported - // by the target architecture. - bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) -} - -/// Retrieve the information we are losing (making dynamic) in an unsizing -/// adjustment. -/// -/// The `old_info` argument is a bit funny. It is intended for use -/// in an upcast, where the new vtable for an object will be derived -/// from the old one. -pub fn unsized_info( - cx: &CodegenCx<'ll, 'tcx>, - source: Ty<'tcx>, - target: Ty<'tcx>, - old_info: Option<&'ll Value>, -) -> &'ll Value { - let (source, target) = cx.tcx.struct_lockstep_tails(source, target); - match (&source.sty, &target.sty) { - (&ty::Array(_, len), &ty::Slice(_)) => { - C_usize(cx, len.unwrap_usize(cx.tcx)) - } - (&ty::Dynamic(..), &ty::Dynamic(..)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - old_info.expect("unsized_info: missing old info for trait upcast") - } - (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target)) - .field(cx, abi::FAT_PTR_EXTRA); - consts::ptrcast(meth::get_vtable(cx, source, data.principal()), - vtable_ptr.llvm_type(cx)) - } - _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", - source, - target), - } -} - -/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr( - bx: &Builder<'a, 'll, 'tcx>, - src: &'ll Value, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx> -) -> (&'ll Value, &'ll Value) { - debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(_, a, _), - &ty::Ref(_, b, _)) | - (&ty::Ref(_, a, _), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - let src_layout = bx.cx.layout_of(src_ty); - let dst_layout = bx.cx.layout_of(dst_ty); - let mut result = None; - for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx, i); - assert_eq!(src_layout.fields.offset(i).bytes(), 0); - assert_eq!(dst_layout.fields.offset(i).bytes(), 0); - if src_f.is_zst() { - continue; - } - assert_eq!(src_layout.size, src_f.size); - - let dst_f = dst_layout.field(bx.cx, i); - assert_ne!(src_f.ty, dst_f.ty); - assert_eq!(result, None); - result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); - } - let (lldata, llextra) = result.unwrap(); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)), - bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true))) - } - _ => bug!("unsize_thin_ptr: called on bad types"), - } -} - -/// Coerce `src`, which is a reference to a value of type `src_ty`, -/// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into( - bx: &Builder<'a, 'll, 'tcx>, - src: PlaceRef<'ll, 'tcx>, - dst: PlaceRef<'ll, 'tcx> -) { - let src_ty = src.layout.ty; - let dst_ty = dst.layout.ty; - let coerce_ptr = || { - let (base, info) = match src.load(bx).val { - OperandValue::Pair(base, info) => { - // fat-ptr to fat-ptr unsize preserves the vtable - // i.e. &'a fmt::Debug+Send => &'a fmt::Debug - // So we need to pointercast the base to ensure - // the types match up. - let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR); - (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info) - } - OperandValue::Immediate(base) => { - unsize_thin_ptr(bx, base, src_ty, dst_ty) - } - OperandValue::Ref(..) => bug!() - }; - OperandValue::Pair(base, info).store(bx, dst); - }; - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(..), &ty::Ref(..)) | - (&ty::Ref(..), &ty::RawPtr(..)) | - (&ty::RawPtr(..), &ty::RawPtr(..)) => { - coerce_ptr() - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - coerce_ptr() - } - - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - for i in 0..def_a.variants[0].fields.len() { - let src_f = src.project_field(bx, i); - let dst_f = dst.project_field(bx, i); - - if dst_f.layout.is_zst() { - continue; - } - - if src_f.layout.ty == dst_f.layout.ty { - memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout, - src_f.align.min(dst_f.align), MemFlags::empty()); - } else { - coerce_unsized_into(bx, src_f, dst_f); - } - } - } - _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", - src_ty, - dst_ty), - } -} - -pub fn cast_shift_expr_rhs( - cx: &Builder<'_, 'll, '_>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { - cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) -} - -fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, - lhs: &'ll Value, - rhs: &'ll Value, - trunc: F, - zext: G) - -> &'ll Value - where F: FnOnce(&'ll Value, &'ll Type) -> &'ll Value, - G: FnOnce(&'ll Value, &'ll Type) -> &'ll Value -{ - // Shifts may have any size int on the rhs - if op.is_shift() { - let mut rhs_llty = val_ty(rhs); - let mut lhs_llty = val_ty(lhs); - if rhs_llty.kind() == TypeKind::Vector { - rhs_llty = rhs_llty.element_type() - } - if lhs_llty.kind() == TypeKind::Vector { - lhs_llty = lhs_llty.element_type() - } - let rhs_sz = rhs_llty.int_width(); - let lhs_sz = lhs_llty.int_width(); - if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) - } else if lhs_sz > rhs_sz { - // FIXME (#1877: If in the future shifting by negative - // values is no longer undefined then this is wrong. - zext(rhs, lhs_llty) - } else { - rhs - } - } else { - rhs - } -} - -/// Returns whether this session's target will use SEH-based unwinding. -/// -/// This is only true for MSVC targets, and even then the 64-bit MSVC target -/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as -/// 64-bit MinGW) instead of "full SEH". -pub fn wants_msvc_seh(sess: &Session) -> bool { - sess.target.target.options.is_like_msvc -} - -pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) { - let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume"); - bx.call(assume_intrinsic, &[val], None); -} - -pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value { - if val_ty(val) == Type::i1(bx.cx) { - bx.zext(val, Type::i8(bx.cx)) - } else { - val - } -} - -pub fn to_immediate( - bx: &Builder<'_, 'll, '_>, - val: &'ll Value, - layout: layout::TyLayout, -) -> &'ll Value { - if let layout::Abi::Scalar(ref scalar) = layout.abi { - return to_immediate_scalar(bx, val, scalar); - } - val -} - -pub fn to_immediate_scalar( - bx: &Builder<'_, 'll, '_>, - val: &'ll Value, - scalar: &layout::Scalar, -) -> &'ll Value { - if scalar.is_bool() { - return bx.trunc(val, Type::i1(bx.cx)); - } - val -} - -pub fn call_memcpy( - bx: &Builder<'_, 'll, '_>, - dst: &'ll Value, - src: &'ll Value, - n_bytes: &'ll Value, - align: Align, - flags: MemFlags, -) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = bx.load(src, align); - let ptr = bx.pointercast(dst, val_ty(val).ptr_to()); - bx.store_with_flags(val, ptr, align, flags); - return; - } - let cx = bx.cx; - let ptr_width = &cx.sess().target.target.target_pointer_width; - let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); - let memcpy = cx.get_intrinsic(&key); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let size = bx.intcast(n_bytes, cx.isize_ty, false); - let align = C_i32(cx, align.abi() as i32); - let volatile = C_bool(cx, flags.contains(MemFlags::VOLATILE)); - bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); -} - -pub fn memcpy_ty( - bx: &Builder<'_, 'll, 'tcx>, - dst: &'ll Value, - src: &'ll Value, - layout: TyLayout<'tcx>, - align: Align, - flags: MemFlags, -) { - let size = layout.size.bytes(); - if size == 0 { - return; - } - - call_memcpy(bx, dst, src, C_usize(bx.cx, size), align, flags); -} - -pub fn call_memset( - bx: &Builder<'_, 'll, '_>, - ptr: &'ll Value, - fill_byte: &'ll Value, - size: &'ll Value, - align: &'ll Value, - volatile: bool, -) -> &'ll Value { - let ptr_width = &bx.cx.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key); - let volatile = C_bool(bx.cx, volatile); - bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) -} - -pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) { - let _s = if cx.sess().codegen_stats() { - let mut instance_name = String::new(); - DefPathBasedNames::new(cx.tcx, true, true) - .push_def_path(instance.def_id(), &mut instance_name); - Some(StatRecorder::new(cx, instance_name)) - } else { - None - }; - - // this is an info! to allow collecting monomorphization statistics - // and to allow finding the last function before LLVM aborts from - // release builds. - info!("codegen_instance({})", instance); - - let fn_ty = instance.ty(cx.tcx); - let sig = common::ty_fn_sig(cx, fn_ty); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - - let lldecl = cx.instances.borrow().get(&instance).cloned().unwrap_or_else(|| - bug!("Instance `{:?}` not already declared", instance)); - - cx.stats.borrow_mut().n_closures += 1; - - let mir = cx.tcx.instance_mir(instance.def); - mir::codegen_mir(cx, lldecl, &mir, instance, sig); -} - -pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { - let sect = match attrs.link_section { - Some(name) => name, - None => return, - }; - unsafe { - let buf = SmallCStr::new(§.as_str()); - llvm::LLVMSetSection(llval, buf.as_ptr()); - } -} - -/// Create the `main` function which will initialize the rust runtime and call -/// users main function. -fn maybe_create_entry_wrapper(cx: &CodegenCx) { - let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { - Some((id, span, _)) => { - (cx.tcx.hir.local_def_id(id), span) - } - None => return, - }; - - let instance = Instance::mono(cx.tcx, main_def_id); - - if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) { - // We want to create the wrapper in the same codegen unit as Rust's main - // function. - return; - } - - let main_llfn = callee::get_fn(cx, instance); - - let et = cx.sess().entry_fn.get().map(|e| e.2); - match et { - Some(EntryFnType::Main) => create_entry_fn(cx, span, main_llfn, main_def_id, true), - Some(EntryFnType::Start) => create_entry_fn(cx, span, main_llfn, main_def_id, false), - None => {} // Do nothing. - } - - fn create_entry_fn( - cx: &CodegenCx<'ll, '_>, - sp: Span, - rust_main: &'ll Value, - rust_main_def_id: DefId, - use_start_lang_item: bool, - ) { - let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); - - let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); - // Given that `main()` has no arguments, - // then its return type cannot have - // late-bound regions, since late-bound - // regions must appear in the argument - // listing. - let main_ret_ty = cx.tcx.erase_regions( - &main_ret_ty.no_late_bound_regions().unwrap(), - ); - - if declare::get_defined_value(cx, "main").is_some() { - // FIXME: We should be smart and show a better diagnostic here. - cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") - .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") - .emit(); - cx.sess().abort_if_errors(); - bug!(); - } - let llfn = declare::declare_cfn(cx, "main", llfty); - - // `main` should respect same config for frame pointer elimination as rest of code - attributes::set_frame_pointer_elimination(cx, llfn); - attributes::apply_target_cpu_attr(cx, llfn); - - let bx = Builder::new_block(cx, llfn, "top"); - - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx); - - // Params from native main() used as args for rust start function - let param_argc = get_param(llfn, 0); - let param_argv = get_param(llfn, 1); - let arg_argc = bx.intcast(param_argc, cx.isize_ty, true); - let arg_argv = param_argv; - - let (start_fn, args) = if use_start_lang_item { - let start_def_id = cx.tcx.require_lang_item(StartFnLangItem); - let start_fn = callee::resolve_and_get_fn( - cx, - start_def_id, - cx.tcx.intern_substs(&[main_ret_ty.into()]), - ); - (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()), - arg_argc, arg_argv]) - } else { - debug!("using user-defined start fn"); - (rust_main, vec![arg_argc, arg_argv]) - }; - - let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, Type::c_int(cx), true)); - } -} - -fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, - llvm_module: &ModuleLlvm) - -> EncodedMetadata { +pub fn write_metadata<'a, 'gcx>( + tcx: TyCtxt<'a, 'gcx, 'gcx>, + llvm_module: &ModuleLlvm +) -> EncodedMetadata { use std::io::Write; use flate2::Compression; use flate2::write::DeflateEncoder; @@ -647,12 +97,12 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = C_bytes_in_context(metadata_llcx, &compressed); - let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = common::bytes_in_context(metadata_llcx, &compressed); + let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { - llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst), buf.as_ptr()) + llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) }; unsafe { llvm::LLVMSetInitializer(llglobal, llconst); @@ -696,491 +146,7 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> { } } -fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - cgu: &CodegenUnit<'tcx>) - -> CguReuse { - if !tcx.dep_graph.is_fully_enabled() { - return CguReuse::No - } - - let work_product_id = &cgu.work_product_id(); - if tcx.dep_graph.previous_work_product(work_product_id).is_none() { - // We don't have anything cached for this CGU. This can happen - // if the CGU did not exist in the previous session. - return CguReuse::No - } - - // Try to mark the CGU as green. If it we can do so, it means that nothing - // affecting the LLVM module has changed and we can re-use a cached version. - // If we compile with any kind of LTO, this means we can re-use the bitcode - // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only - // know that later). If we are not doing LTO, there is only one optimized - // version of each module, so we re-use that. - let dep_node = cgu.codegen_dep_node(tcx); - assert!(!tcx.dep_graph.dep_node_exists(&dep_node), - "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", - cgu.name()); - - if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { - // We can re-use either the pre- or the post-thinlto state - if tcx.sess.lto() != Lto::No { - CguReuse::PreLto - } else { - CguReuse::PostLto - } - } else { - CguReuse::No - } -} - -pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - rx: mpsc::Receiver>) - -> OngoingCodegen -{ - check_for_rustc_errors_attr(tcx); - - if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { - if unsafe { !llvm::LLVMRustThinLTOAvailable() } { - tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); - } - } - - if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || - !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && - unsafe { !llvm::LLVMRustPGOAvailable() } - { - tcx.sess.fatal("this compiler's LLVM does not support PGO"); - } - - let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); - - // Codegen the metadata. - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); - - let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("metadata")).as_str() - .to_string(); - let metadata_llvm_module = ModuleLlvm::new(tcx.sess, &metadata_cgu_name); - let metadata = time(tcx.sess, "write metadata", || { - write_metadata(tcx, &metadata_llvm_module) - }); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); - - let metadata_module = ModuleCodegen { - name: metadata_cgu_name, - module_llvm: metadata_llvm_module, - kind: ModuleKind::Metadata, - }; - - let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { - Some(time_graph::TimeGraph::new()) - } else { - None - }; - - // Skip crate items and just output metadata in -Z no-codegen mode. - if tcx.sess.opts.debugging_opts.no_codegen || - !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = write::start_async_codegen( - tcx, - time_graph, - metadata, - rx, - 1); - - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - ongoing_codegen.codegen_finished(tcx); - - assert_and_save_dep_graph(tcx); - - ongoing_codegen.check_for_errors(tcx.sess); - - return ongoing_codegen; - } - - // Run the monomorphization collector and partition the collected items into - // codegen units. - let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; - let codegen_units = (*codegen_units).clone(); - - // Force all codegen_unit queries so they are already either red or green - // when compile_codegen_unit accesses them. We are not able to re-execute - // the codegen_unit query from just the DepNode, so an unknown color would - // lead to having to re-execute compile_codegen_unit, possibly - // unnecessarily. - if tcx.dep_graph.is_fully_enabled() { - for cgu in &codegen_units { - tcx.codegen_unit(cgu.name().clone()); - } - } - - let ongoing_codegen = write::start_async_codegen( - tcx, - time_graph.clone(), - metadata, - rx, - codegen_units.len()); - - // Codegen an allocator shim, if necessary. - // - // If the crate doesn't have an `allocator_kind` set then there's definitely - // no shim to generate. Otherwise we also check our dependency graph for all - // our output crate types. If anything there looks like its a `Dynamic` - // linkage, then it's already got an allocator shim and we'll be using that - // one instead. If nothing exists then it's our job to generate the - // allocator! - let any_dynamic_crate = tcx.sess.dependency_formats.borrow() - .iter() - .any(|(_, list)| { - use rustc::middle::dependency_format::Linkage; - list.iter().any(|&linkage| linkage == Linkage::Dynamic) - }); - let allocator_module = if any_dynamic_crate { - None - } else if let Some(kind) = *tcx.sess.allocator_kind.get() { - let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("allocator")).as_str() - .to_string(); - let modules = ModuleLlvm::new(tcx.sess, &llmod_id); - time(tcx.sess, "write allocator module", || { - unsafe { - allocator::codegen(tcx, &modules, kind) - } - }); - - Some(ModuleCodegen { - name: llmod_id, - module_llvm: modules, - kind: ModuleKind::Allocator, - }) - } else { - None - }; - - if let Some(allocator_module) = allocator_module { - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); - } - - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - - // We sort the codegen units by size. This way we can schedule work for LLVM - // a bit more efficiently. - let codegen_units = { - let mut codegen_units = codegen_units; - codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); - codegen_units - }; - - let mut total_codegen_time = Duration::new(0, 0); - let mut all_stats = Stats::default(); - - for cgu in codegen_units.into_iter() { - ongoing_codegen.wait_for_signal_to_codegen_item(); - ongoing_codegen.check_for_errors(tcx.sess); - - let cgu_reuse = determine_cgu_reuse(tcx, &cgu); - tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); - - match cgu_reuse { - CguReuse::No => { - let _timing_guard = time_graph.as_ref().map(|time_graph| { - time_graph.start(write::CODEGEN_WORKER_TIMELINE, - write::CODEGEN_WORK_PACKAGE_KIND, - &format!("codegen {}", cgu.name())) - }); - let start_time = Instant::now(); - let stats = compile_codegen_unit(tcx, *cgu.name()); - all_stats.extend(stats); - total_codegen_time += start_time.elapsed(); - false - } - CguReuse::PreLto => { - write::submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - CguReuse::PostLto => { - write::submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - }; - } - - ongoing_codegen.codegen_finished(tcx); - - // Since the main thread is sometimes blocked during codegen, we keep track - // -Ztime-passes output manually. - print_time_passes_entry(tcx.sess.time_passes(), - "codegen to LLVM IR", - total_codegen_time); - - rustc_incremental::assert_module_sources::assert_module_sources(tcx); - - symbol_names_test::report_symbol_names(tcx); - - if tcx.sess.codegen_stats() { - println!("--- codegen stats ---"); - println!("n_glues_created: {}", all_stats.n_glues_created); - println!("n_null_glues: {}", all_stats.n_null_glues); - println!("n_real_glues: {}", all_stats.n_real_glues); - - println!("n_fns: {}", all_stats.n_fns); - println!("n_inlines: {}", all_stats.n_inlines); - println!("n_closures: {}", all_stats.n_closures); - println!("fn stats:"); - all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); - for &(ref name, insns) in all_stats.fn_stats.iter() { - println!("{} insns, {}", insns, *name); - } - } - - if tcx.sess.count_llvm_insns() { - for (k, v) in all_stats.llvm_insns.iter() { - println!("{:7} {}", *v, *k); - } - } - - ongoing_codegen.check_for_errors(tcx.sess); - - assert_and_save_dep_graph(tcx); - ongoing_codegen -} - -fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - time(tcx.sess, - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(tcx.sess, - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx)); -} - -fn collect_and_partition_mono_items<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - cnum: CrateNum, -) -> (Arc, Arc>>>) -{ - assert_eq!(cnum, LOCAL_CRATE); - - let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { - Some(ref s) => { - let mode_string = s.to_lowercase(); - let mode_string = mode_string.trim(); - if mode_string == "eager" { - MonoItemCollectionMode::Eager - } else { - if mode_string != "lazy" { - let message = format!("Unknown codegen-item collection mode '{}'. \ - Falling back to 'lazy' mode.", - mode_string); - tcx.sess.warn(&message); - } - - MonoItemCollectionMode::Lazy - } - } - None => { - if tcx.sess.opts.cg.link_dead_code { - MonoItemCollectionMode::Eager - } else { - MonoItemCollectionMode::Lazy - } - } - }; - - let (items, inlining_map) = - time(tcx.sess, "monomorphization collection", || { - collector::collect_crate_mono_items(tcx, collection_mode) - }); - - tcx.sess.abort_if_errors(); - - ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); - - let strategy = if tcx.sess.opts.incremental.is_some() { - PartitioningStrategy::PerModule - } else { - PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) - }; - - let codegen_units = time(tcx.sess, "codegen unit partitioning", || { - partitioning::partition(tcx, - items.iter().cloned(), - strategy, - &inlining_map) - .into_iter() - .map(Arc::new) - .collect::>() - }); - - let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { - match *mono_item { - MonoItem::Fn(ref instance) => Some(instance.def_id()), - MonoItem::Static(def_id) => Some(def_id), - _ => None, - } - }).collect(); - - if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { - let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); - - for cgu in &codegen_units { - for (&mono_item, &linkage) in cgu.items() { - item_to_cgus.entry(mono_item) - .or_default() - .push((cgu.name().clone(), linkage)); - } - } - - let mut item_keys: Vec<_> = items - .iter() - .map(|i| { - let mut output = i.to_string(tcx); - output.push_str(" @@"); - let mut empty = Vec::new(); - let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); - cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); - cgus.dedup(); - for &(ref cgu_name, (linkage, _)) in cgus.iter() { - output.push_str(" "); - output.push_str(&cgu_name.as_str()); - - let linkage_abbrev = match linkage { - Linkage::External => "External", - Linkage::AvailableExternally => "Available", - Linkage::LinkOnceAny => "OnceAny", - Linkage::LinkOnceODR => "OnceODR", - Linkage::WeakAny => "WeakAny", - Linkage::WeakODR => "WeakODR", - Linkage::Appending => "Appending", - Linkage::Internal => "Internal", - Linkage::Private => "Private", - Linkage::ExternalWeak => "ExternalWeak", - Linkage::Common => "Common", - }; - - output.push_str("["); - output.push_str(linkage_abbrev); - output.push_str("]"); - } - output - }) - .collect(); - - item_keys.sort(); - - for item in item_keys { - println!("MONO_ITEM {}", item); - } - } - - (Arc::new(mono_items), Arc::new(codegen_units)) -} - -impl CrateInfo { - pub fn new(tcx: TyCtxt) -> CrateInfo { - let mut info = CrateInfo { - panic_runtime: None, - compiler_builtins: None, - profiler_runtime: None, - sanitizer_runtime: None, - is_no_builtins: Default::default(), - native_libraries: Default::default(), - used_libraries: tcx.native_libraries(LOCAL_CRATE), - link_args: tcx.link_args(LOCAL_CRATE), - crate_name: Default::default(), - used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), - used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), - used_crate_source: Default::default(), - wasm_imports: Default::default(), - lang_item_to_crate: Default::default(), - missing_lang_items: Default::default(), - }; - let lang_items = tcx.lang_items(); - - let load_wasm_items = tcx.sess.crate_types.borrow() - .iter() - .any(|c| *c != config::CrateType::Rlib) && - tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; - - if load_wasm_items { - info.load_wasm_imports(tcx, LOCAL_CRATE); - } - - let crates = tcx.crates(); - - let n_crates = crates.len(); - info.native_libraries.reserve(n_crates); - info.crate_name.reserve(n_crates); - info.used_crate_source.reserve(n_crates); - info.missing_lang_items.reserve(n_crates); - - for &cnum in crates.iter() { - info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); - info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); - info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); - if tcx.is_panic_runtime(cnum) { - info.panic_runtime = Some(cnum); - } - if tcx.is_compiler_builtins(cnum) { - info.compiler_builtins = Some(cnum); - } - if tcx.is_profiler_runtime(cnum) { - info.profiler_runtime = Some(cnum); - } - if tcx.is_sanitizer_runtime(cnum) { - info.sanitizer_runtime = Some(cnum); - } - if tcx.is_no_builtins(cnum) { - info.is_no_builtins.insert(cnum); - } - if load_wasm_items { - info.load_wasm_imports(tcx, cnum); - } - let missing = tcx.missing_lang_items(cnum); - for &item in missing.iter() { - if let Ok(id) = lang_items.require(item) { - info.lang_item_to_crate.insert(item, id.krate); - } - } - - // No need to look for lang items that are whitelisted and don't - // actually need to exist. - let missing = missing.iter() - .cloned() - .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) - .collect(); - info.missing_lang_items.insert(cnum, missing); - } - - return info - } - - fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { - self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { - let instance = Instance::mono(tcx, id); - let import_name = tcx.symbol_name(instance); - - (import_name.to_string(), module.clone()) - })); - } -} - -fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { - let (all_mono_items, _) = - tcx.collect_and_partition_mono_items(LOCAL_CRATE); - all_mono_items.contains(&id) -} - -fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { let start_time = Instant::now(); @@ -1197,41 +163,39 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; - write::submit_codegened_module_to_llvm(tcx, - module, - cost); + submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost); return stats; - fn module_codegen<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn module_codegen<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) - -> (Stats, ModuleCodegen) + -> (Stats, ModuleCodegen) { + let backend = LlvmCodegenBackend(()); let cgu = tcx.codegen_unit(cgu_name); - // Instantiate monomorphizations without filling out definitions yet... - let llvm_module = ModuleLlvm::new(tcx.sess, &cgu_name.as_str()); + let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str()); let stats = { let cx = CodegenCx::new(tcx, cgu, &llvm_module); let mono_items = cx.codegen_unit .items_in_deterministic_order(cx.tcx); for &(mono_item, (linkage, visibility)) in &mono_items { - mono_item.predefine(&cx, linkage, visibility); + mono_item.predefine::(&cx, linkage, visibility); } // ... and now that we have everything pre-defined, fill out those definitions. for &(mono_item, _) in &mono_items { - mono_item.define(&cx); + mono_item.define::(&cx); } // If this codegen unit contains the main function, also create the // wrapper here - maybe_create_entry_wrapper(&cx); + maybe_create_entry_wrapper::(&cx); // Run replace-all-uses-with for statics that need it - for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { + for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, val_ty(old_g)); + let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); llvm::LLVMReplaceAllUsesWith(old_g, bitcast); llvm::LLVMDeleteGlobal(old_g); } @@ -1239,27 +203,16 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Create the llvm.used variable // This variable has type [N x i8*] and is stored in the llvm.metadata section - if !cx.used_statics.borrow().is_empty() { - let name = const_cstr!("llvm.used"); - let section = const_cstr!("llvm.metadata"); - let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); - - unsafe { - let g = llvm::LLVMAddGlobal(cx.llmod, - val_ty(array), - name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - } + if !cx.used_statics().borrow().is_empty() { + cx.create_used_variable() } // Finalize debuginfo if cx.sess().opts.debuginfo != DebugInfo::None { - debuginfo::finalize(&cx); + cx.debuginfo_finalize(); } - cx.stats.into_inner() + cx.consume_stats().into_inner() }; (stats, ModuleCodegen { @@ -1270,52 +223,15 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn provide(providers: &mut Providers) { - providers.collect_and_partition_mono_items = - collect_and_partition_mono_items; - - providers.is_codegened_item = is_codegened_item; - - providers.codegen_unit = |tcx, name| { - let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); - all.iter() - .find(|cgu| *cgu.name() == name) - .cloned() - .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) - }; - - provide_extern(providers); -} - -pub fn provide_extern(providers: &mut Providers) { - providers.dllimport_foreign_items = |tcx, krate| { - let module_map = tcx.foreign_modules(krate); - let module_map = module_map.iter() - .map(|lib| (lib.def_id, lib)) - .collect::>(); - - let dllimports = tcx.native_libraries(krate) - .iter() - .filter(|lib| { - if lib.kind != cstore::NativeLibraryKind::NativeUnknown { - return false - } - let cfg = match lib.cfg { - Some(ref cfg) => cfg, - None => return true, - }; - attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) - }) - .filter_map(|lib| lib.foreign_module) - .map(|id| &module_map[&id]) - .flat_map(|module| module.foreign_items.iter().cloned()) - .collect(); - Lrc::new(dllimports) - }; - - providers.is_dllimport_foreign_item = |tcx, def_id| { - tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) +pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { + let sect = match attrs.link_section { + Some(name) => name, + None => return, }; + unsafe { + let buf = SmallCStr::new(§.as_str()); + llvm::LLVMSetSection(llval, buf.as_ptr()); + } } pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { @@ -1341,25 +257,3 @@ pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { Visibility::Protected => llvm::Visibility::Protected, } } - -// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement -// the HashStable trait. Normally DepGraph::with_task() calls are -// hidden behind queries, but CGU creation is a special case in two -// ways: (1) it's not a query and (2) CGU are output nodes, so their -// Fingerprints are not actually needed. It remains to be clarified -// how exactly this case will be handled in the red/green system but -// for now we content ourselves with providing a no-op HashStable -// implementation for CGUs. -mod temp_stable_hash_impls { - use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, - HashStable}; - use ModuleCodegen; - - impl HashStable for ModuleCodegen { - fn hash_stable(&self, - _: &mut HCX, - _: &mut StableHasher) { - // do nothing - } - } -} diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 2fe6a0377f..a95ddefc86 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,19 +9,28 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{IntPredicate, RealPredicate, False, OperandBundleDef}; -use llvm::{self, BasicBlock}; -use common::*; +use llvm::{self, False, BasicBlock}; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; +use rustc_codegen_ssa::{self, MemFlags}; +use common::Funclet; +use context::CodegenCx; use type_::Type; +use type_of::LayoutLlvmExt; use value::Value; use libc::{c_uint, c_char}; -use rustc::ty::TyCtxt; -use rustc::ty::layout::{Align, Size}; -use rustc::session::{config, Session}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, Size, TyLayout}; +use rustc::hir::def_id::DefId; +use rustc::session::config; use rustc_data_structures::small_c_str::SmallCStr; - +use rustc_codegen_ssa::traits::*; +use syntax; +use rustc_codegen_ssa::base::to_immediate; +use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; +use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; -use std::ops::Range; +use std::ffi::CStr; +use std::ops::{Deref, Range}; use std::ptr; // All Builders must have an llfn associated with them @@ -46,17 +55,55 @@ fn noname() -> *const c_char { &CNULL } -bitflags! { - pub struct MemFlags: u8 { - const VOLATILE = 1 << 0; - const NONTEMPORAL = 1 << 1; - const UNALIGNED = 1 << 2; +impl BackendTypes for Builder<'_, 'll, 'tcx> { + type Value = as BackendTypes>::Value; + type BasicBlock = as BackendTypes>::BasicBlock; + type Type = as BackendTypes>::Type; + type Funclet = as BackendTypes>::Funclet; + + type DIScope = as BackendTypes>::DIScope; +} + +impl ty::layout::HasDataLayout for Builder<'_, '_, '_> { + fn data_layout(&self) -> &ty::layout::TargetDataLayout { + self.cx.data_layout() } } -impl Builder<'a, 'll, 'tcx> { - pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self { - let bx = Builder::with_cx(cx); +impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.cx.tcx + } +} + +impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> { + type Ty = Ty<'tcx>; + type TyLayout = TyLayout<'tcx>; + + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { + self.cx.layout_of(ty) + } +} + +impl Deref for Builder<'_, 'll, 'tcx> { + type Target = CodegenCx<'ll, 'tcx>; + + fn deref(&self) -> &Self::Target { + self.cx + } +} + +impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { + type CodegenCx = CodegenCx<'ll, 'tcx>; +} + +impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + name: &'b str + ) -> Self { + let mut bx = Builder::with_cx(cx); let llbb = unsafe { let name = SmallCStr::new(name); llvm::LLVMAppendBasicBlockInContext( @@ -69,7 +116,7 @@ impl Builder<'a, 'll, 'tcx> { bx } - pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -80,85 +127,77 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx> { + fn build_sibling_block<'b>(&self, name: &'b str) -> Self { Builder::new_block(self.cx, self.llfn(), name) } - pub fn sess(&self) -> &Session { - self.cx.sess() - } - - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.cx.tcx - } - - pub fn llfn(&self) -> &'ll Value { + fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - pub fn llbb(&self) -> &'ll BasicBlock { + fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } fn count_insn(&self, category: &str) { - if self.cx.sess().codegen_stats() { - self.cx.stats.borrow_mut().n_llvm_insns += 1; + if self.sess().codegen_stats() { + self.stats.borrow_mut().n_llvm_insns += 1; } - if self.cx.sess().count_llvm_insns() { - *self.cx.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; + if self.sess().count_llvm_insns() { + *self.stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; } } - pub fn set_value_name(&self, value: &'ll Value, name: &str) { + fn set_value_name(&mut self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - pub fn position_at_end(&self, llbb: &'ll BasicBlock) { + fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - pub fn position_at_start(&self, llbb: &'ll BasicBlock) { + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } } - pub fn ret_void(&self) { + fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } } - pub fn ret(&self, v: &'ll Value) { + fn ret(&mut self, v: &'ll Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - pub fn br(&self, dest: &'ll BasicBlock) { + fn br(&mut self, dest: &'ll BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); } } - pub fn cond_br( - &self, + fn cond_br( + &mut self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, @@ -169,8 +208,8 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn switch( - &self, + fn switch( + &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, num_cases: usize, @@ -180,12 +219,14 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn invoke(&self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + fn invoke( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -193,7 +234,8 @@ impl Builder<'a, 'll, 'tcx> { args); let args = self.check_call("invoke", llfn, args); - let bundle = bundle.map(|b| &*b.raw); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); unsafe { llvm::LLVMRustBuildInvoke(self.llbuilder, @@ -207,7 +249,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn unreachable(&self) { + fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); @@ -215,21 +257,21 @@ impl Builder<'a, 'll, 'tcx> { } /* Arithmetic */ - pub fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -238,21 +280,21 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -261,21 +303,21 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -285,42 +327,42 @@ impl Builder<'a, 'll, 'tcx> { } - pub fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -329,28 +371,28 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - pub fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -359,78 +401,152 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - pub fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - pub fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - pub fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - pub fn neg(&self, v: &'ll Value) -> &'ll Value { + fn neg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - pub fn fneg(&self, v: &'ll Value) -> &'ll Value { + fn fneg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - pub fn not(&self, v: &'ll Value) -> &'ll Value { + fn not(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - pub fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - let bx = Builder::with_cx(self.cx); + fn checked_binop( + &mut self, + oop: OverflowOp, + ty: Ty, + lhs: Self::Value, + rhs: Self::Value, + ) -> (Self::Value, Self::Value) { + use syntax::ast::IntTy::*; + use syntax::ast::UintTy::*; + use rustc::ty::{Int, Uint}; + + let new_sty = match ty.sty { + Int(Isize) => Int(self.tcx.sess.target.isize_ty), + Uint(Usize) => Uint(self.tcx.sess.target.usize_ty), + ref t @ Uint(_) | ref t @ Int(_) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type") + }; + + let name = match oop { + OverflowOp::Add => match new_sty { + Int(I8) => "llvm.sadd.with.overflow.i8", + Int(I16) => "llvm.sadd.with.overflow.i16", + Int(I32) => "llvm.sadd.with.overflow.i32", + Int(I64) => "llvm.sadd.with.overflow.i64", + Int(I128) => "llvm.sadd.with.overflow.i128", + + Uint(U8) => "llvm.uadd.with.overflow.i8", + Uint(U16) => "llvm.uadd.with.overflow.i16", + Uint(U32) => "llvm.uadd.with.overflow.i32", + Uint(U64) => "llvm.uadd.with.overflow.i64", + Uint(U128) => "llvm.uadd.with.overflow.i128", + + _ => unreachable!(), + }, + OverflowOp::Sub => match new_sty { + Int(I8) => "llvm.ssub.with.overflow.i8", + Int(I16) => "llvm.ssub.with.overflow.i16", + Int(I32) => "llvm.ssub.with.overflow.i32", + Int(I64) => "llvm.ssub.with.overflow.i64", + Int(I128) => "llvm.ssub.with.overflow.i128", + + Uint(U8) => "llvm.usub.with.overflow.i8", + Uint(U16) => "llvm.usub.with.overflow.i16", + Uint(U32) => "llvm.usub.with.overflow.i32", + Uint(U64) => "llvm.usub.with.overflow.i64", + Uint(U128) => "llvm.usub.with.overflow.i128", + + _ => unreachable!(), + }, + OverflowOp::Mul => match new_sty { + Int(I8) => "llvm.smul.with.overflow.i8", + Int(I16) => "llvm.smul.with.overflow.i16", + Int(I32) => "llvm.smul.with.overflow.i32", + Int(I64) => "llvm.smul.with.overflow.i64", + Int(I128) => "llvm.smul.with.overflow.i128", + + Uint(U8) => "llvm.umul.with.overflow.i8", + Uint(U16) => "llvm.umul.with.overflow.i16", + Uint(U32) => "llvm.umul.with.overflow.i32", + Uint(U64) => "llvm.umul.with.overflow.i64", + Uint(U128) => "llvm.umul.with.overflow.i128", + + _ => unreachable!(), + }, + }; + + let intrinsic = self.get_intrinsic(&name); + let res = self.call(intrinsic, &[lhs, rhs], None); + ( + self.extract_value(res, 0), + self.extract_value(res, 1), + ) + } + + fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); bx.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -440,12 +556,12 @@ impl Builder<'a, 'll, 'tcx> { llvm::LLVMBuildAlloca(self.llbuilder, ty, name.as_ptr()) }; - llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } - pub fn array_alloca(&self, + fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, name: &str, @@ -459,21 +575,21 @@ impl Builder<'a, 'll, 'tcx> { llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, name.as_ptr()) }; - llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } - pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); - llvm::LLVMSetAlignment(load, align.abi() as c_uint); + llvm::LLVMSetAlignment(load, align.bytes() as c_uint); load } } - pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -482,20 +598,105 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value { + fn atomic_load( + &mut self, + ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + size: Size, + ) -> &'ll Value { self.count_insn("load.atomic"); unsafe { - let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); - // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? - // However, 64-bit atomic loads on `i686-apple-darwin` appear to - // require `___atomic_load` with ABI-alignment, so it's staying. - llvm::LLVMSetAlignment(load, align.pref() as c_uint); + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic loads to be at least the size of the type. + llvm::LLVMSetAlignment(load, size.bytes() as c_uint); load } } + fn load_operand( + &mut self, + place: PlaceRef<'tcx, &'ll Value> + ) -> OperandRef<'tcx, &'ll Value> { + debug!("PlaceRef::load: {:?}", place); - pub fn range_metadata(&self, load: &'ll Value, range: Range) { + assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + + if place.layout.is_zst() { + return OperandRef::new_zst(self.cx(), place.layout); + } + + fn scalar_load_metadata<'a, 'll, 'tcx>( + bx: &mut Builder<'a, 'll, 'tcx>, + load: &'ll Value, + scalar: &layout::Scalar + ) { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(bx); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + } + + let val = if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } else if place.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = self.load(place.llval, place.align); + if let layout::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + load + }); + OperandValue::Immediate(to_immediate(self, llval, place.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + + let mut load = |i, scalar: &layout::Scalar, align| { + let llptr = self.struct_gep(place.llval, i as u64); + let load = self.load(llptr, align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { + self.trunc(load, self.type_i1()) + } else { + load + } + }; + + OperandValue::Pair( + load(0, a, place.align), + load(1, b, place.align.restrict_for_offset(b_offset)), + ) + } else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { val, layout: place.layout } + } + + + + fn range_metadata(&mut self, load: &'ll Value, range: Range) { if self.sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, @@ -505,10 +706,10 @@ impl Builder<'a, 'll, 'tcx> { } unsafe { - let llty = val_ty(load); + let llty = self.cx.val_ty(load); let v = [ - C_uint_big(llty, range.start), - C_uint_big(llty, range.end) + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) ]; llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, @@ -518,19 +719,19 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn nonnull_metadata(&self, load: &'ll Value) { + fn nonnull_metadata(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - pub fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - pub fn store_with_flags( - &self, + fn store_with_flags( + &mut self, val: &'ll Value, ptr: &'ll Value, align: Align, @@ -544,7 +745,7 @@ impl Builder<'a, 'll, 'tcx> { let align = if flags.contains(MemFlags::UNALIGNED) { 1 } else { - align.abi() as c_uint + align.bytes() as c_uint }; llvm::LLVMSetAlignment(store, align); if flags.contains(MemFlags::VOLATILE) { @@ -555,7 +756,7 @@ impl Builder<'a, 'll, 'tcx> { // *always* point to a metadata value of the integer 1. // // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = C_i32(self.cx, 1); + let one = self.cx.const_i32(1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); } @@ -563,20 +764,24 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: AtomicOrdering, align: Align) { + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); unsafe { - let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); - // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? - // Also see `atomic_load` for more context. - llvm::LLVMSetAlignment(store, align.pref() as c_uint); + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order), + ); + // LLVM requires the alignment of atomic stores to be at least the size of the type. + llvm::LLVMSetAlignment(store, size.bytes() as c_uint); } } - pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -584,7 +789,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -592,122 +797,109 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); - unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) - } - } - /* Casts */ - pub fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - - pub fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - pub fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - pub fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - pub fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) } } - pub fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("pointercast"); - unsafe { - llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) - } - } - pub fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { self.count_insn("intcast"); unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } + fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); + unsafe { + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + } + } + /* Comparisons */ - pub fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); + let op = llvm::IntPredicate::from_generic(op); unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } - pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -715,14 +907,14 @@ impl Builder<'a, 'll, 'tcx> { } /* Miscellaneous instructions */ - pub fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -734,10 +926,10 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> Option<&'ll Value> { + fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, + inputs: &[&'ll Value], output: &'ll Type, + volatile: bool, alignstack: bool, + dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -747,18 +939,24 @@ impl Builder<'a, 'll, 'tcx> { let argtys = inputs.iter().map(|v| { debug!("Asm Input Type: {:?}", *v); - val_ty(*v) + self.cx.val_ty(*v) }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = Type::func(&argtys[..], output); + let fty = self.type_func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); + let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); debug!("Constraint verification result: {:?}", constraints_ok); if constraints_ok { let v = llvm::LLVMRustInlineAsm( - fty, asm, cons, volatile, alignstack, dia); + fty, + asm.as_ptr(), + cons.as_ptr(), + volatile, + alignstack, + AsmDialect::from_generic(dia), + ); Some(self.call(v, inputs, None)) } else { // LLVM has detected an issue with our constraints, bail out @@ -767,31 +965,71 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { - self.count_insn("call"); - - debug!("Call {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("call", llfn, args); - let bundle = bundle.map(|b| &*b.raw); - + fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); unsafe { - llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), - args.len() as c_uint, bundle, noname()) + llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); } } - pub fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn memmove(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memmove. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); + unsafe { + llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); + } + } + + fn memset( + &mut self, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: Align, + flags: MemFlags, + ) { + let ptr_width = &self.sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = self.get_intrinsic(&intrinsic_key); + let ptr = self.pointercast(ptr, self.type_i8p()); + let align = self.const_u32(align.bytes() as u32); + let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); + self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); + } + + fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -799,8 +1037,8 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn select( - &self, cond: &'ll Value, + fn select( + &mut self, cond: &'ll Value, then_val: &'ll Value, else_val: &'ll Value, ) -> &'ll Value { @@ -811,22 +1049,22 @@ impl Builder<'a, 'll, 'tcx> { } #[allow(dead_code)] - pub fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - pub fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) } } - pub fn insert_element( - &self, vec: &'ll Value, + fn insert_element( + &mut self, vec: &'ll Value, elt: &'ll Value, idx: &'ll Value, ) -> &'ll Value { @@ -836,130 +1074,99 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - pub fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { - let elt_ty = val_ty(elt); - let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); - let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); - self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) + let elt_ty = self.cx.val_ty(elt); + let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); + let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); + let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) } } - pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once // https://bugs.llvm.org/show_bug.cgi?id=36732 // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) - .expect("LLVMRustBuildVectorReduceFAdd is not available in LLVM version < 5.0"); + let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); llvm::LLVMRustSetHasUnsafeAlgebra(instr); instr } } - pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once // https://bugs.llvm.org/show_bug.cgi?id=36732 // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) - .expect("LLVMRustBuildVectorReduceFMul is not available in LLVM version < 5.0"); + let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); llvm::LLVMRustSetHasUnsafeAlgebra(instr); instr } } - pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.add"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); - instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } } - pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.mul"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); - instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } } - pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.and"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); - instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } } - pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.or"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); - instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } } - pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.xor"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); - instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } } - pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); - instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } } - pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); - instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } } - pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) - .expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0"); + let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); llvm::LLVMRustSetHasUnsafeAlgebra(instr); instr } } - pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) - .expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0"); + let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); llvm::LLVMRustSetHasUnsafeAlgebra(instr); instr } } - pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.min"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); - instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } } - pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.max"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); - instr.expect("LLVMRustBuildVectorReduceMax is not available in LLVM version < 5.0") - } + unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } } - pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -967,7 +1174,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -977,7 +1184,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, + fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { @@ -986,29 +1193,29 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - pub fn set_cleanup(&self, landing_pad: &'ll Value) { + fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - pub fn resume(&self, exn: &'ll Value) -> &'ll Value { + fn resume(&mut self, exn: &'ll Value) -> &'ll Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } } - pub fn cleanup_pad(&self, + fn cleanup_pad(&mut self, parent: Option<&'ll Value>, - args: &[&'ll Value]) -> &'ll Value { + args: &[&'ll Value]) -> Funclet<'ll> { self.count_insn("cleanuppad"); let name = const_cstr!("cleanuppad"); let ret = unsafe { @@ -1018,23 +1225,23 @@ impl Builder<'a, 'll, 'tcx> { args.as_ptr(), name.as_ptr()) }; - ret.expect("LLVM does not have support for cleanuppad") + Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) } - pub fn cleanup_ret( - &self, cleanup: &'ll Value, + fn cleanup_ret( + &mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>, ) -> &'ll Value { self.count_insn("cleanupret"); let ret = unsafe { - llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind) + llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for cleanupret") } - pub fn catch_pad(&self, + fn catch_pad(&mut self, parent: &'ll Value, - args: &[&'ll Value]) -> &'ll Value { + args: &[&'ll Value]) -> Funclet<'ll> { self.count_insn("catchpad"); let name = const_cstr!("catchpad"); let ret = unsafe { @@ -1042,19 +1249,19 @@ impl Builder<'a, 'll, 'tcx> { args.len() as c_uint, args.as_ptr(), name.as_ptr()) }; - ret.expect("LLVM does not have support for catchpad") + Funclet::new(ret.expect("LLVM does not have support for catchpad")) } - pub fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { self.count_insn("catchret"); let ret = unsafe { - llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) + llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for catchret") } - pub fn catch_switch( - &self, + fn catch_switch( + &mut self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, num_handlers: usize, @@ -1069,80 +1276,101 @@ impl Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchswitch") } - pub fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - pub fn set_personality_fn(&self, personality: &'ll Value) { + fn set_personality_fn(&mut self, personality: &'ll Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } } // Atomic Operations - pub fn atomic_cmpxchg( - &self, + fn atomic_cmpxchg( + &mut self, dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: llvm::Bool, + order: rustc_codegen_ssa::common::AtomicOrdering, + failure_order: rustc_codegen_ssa::common::AtomicOrdering, + weak: bool, ) -> &'ll Value { + let weak = if weak { llvm::True } else { llvm::False }; unsafe { - llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, - order, failure_order, weak) + llvm::LLVMRustBuildAtomicCmpXchg( + self.llbuilder, + dst, + cmp, + src, + AtomicOrdering::from_generic(order), + AtomicOrdering::from_generic(failure_order), + weak + ) } } - pub fn atomic_rmw( - &self, - op: AtomicRmwBinOp, + fn atomic_rmw( + &mut self, + op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { unsafe { - llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False) + llvm::LLVMBuildAtomicRMW( + self.llbuilder, + AtomicRmwBinOp::from_generic(op), + dst, + src, + AtomicOrdering::from_generic(order), + False) } } - pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { + fn atomic_fence( + &mut self, + order: rustc_codegen_ssa::common::AtomicOrdering, + scope: rustc_codegen_ssa::common::SynchronizationScope + ) { unsafe { - llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); + llvm::LLVMRustBuildAtomicFence( + self.llbuilder, + AtomicOrdering::from_generic(order), + SynchronizationScope::from_generic(scope) + ); } } - pub fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - pub fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - pub fn set_invariant_load(&self, load: &'ll Value) { + fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - /// Returns the ptr value that should be used for storing `val`. - fn check_store<'b>(&self, + fn check_store<'b>(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = val_ty(ptr); - let stored_ty = val_ty(val); - let stored_ptr_ty = stored_ty.ptr_to(); + let dest_ptr_ty = self.cx.val_ty(ptr); + let stored_ty = self.cx.val_ty(val); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); + assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); if dest_ptr_ty == stored_ptr_ty { ptr @@ -1154,24 +1382,23 @@ impl Builder<'a, 'll, 'tcx> { } } - /// Returns the args that should be used for a call to `llfn`. - fn check_call<'b>(&self, + fn check_call<'b>(&mut self, typ: &str, llfn: &'ll Value, args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = val_ty(llfn); + let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers - while fn_ty.kind() == llvm::TypeKind::Pointer { - fn_ty = fn_ty.element_type(); + while self.cx.type_kind(fn_ty) == TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); } - assert!(fn_ty.kind() == llvm::TypeKind::Function, + assert!(self.cx.type_kind(fn_ty) == TypeKind::Function, "builder::{} not passed a function, but {:?}", typ, fn_ty); - let param_tys = fn_ty.func_params(); + let param_tys = self.cx.func_params_types(fn_ty); let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| val_ty(v))) + .zip(args.iter().map(|&v| self.val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1182,7 +1409,7 @@ impl Builder<'a, 'll, 'tcx> { .zip(args.iter()) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = val_ty(actual_val); + let actual_ty = self.val_ty(actual_val); if expected_ty != actual_ty { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", @@ -1197,23 +1424,77 @@ impl Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } - pub fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - pub fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } - /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations - /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - /// and the intrinsic for `lt` and passes them to `emit`, which is in - /// charge of generating code to call the passed intrinsic on whatever - /// block of generated code is targeted for the intrinsic. - /// - /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations - /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { + fn call( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + bundle, noname() + ) + } + } + + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } + } + + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + + fn cx(&self) -> &CodegenCx<'ll, 'tcx> { + self.cx + } + + unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { + llvm::LLVMDeleteBasicBlock(bb); + } + + fn do_not_inline(&mut self, llret: &'ll Value) { + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + } +} + +impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn get_static(&self, def_id: DefId) -> &'ll Value { + self.cx().get_static(def_id) + } +} + +impl Builder<'a, 'll, 'tcx> { + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; } @@ -1225,7 +1506,7 @@ impl Builder<'a, 'll, 'tcx> { let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, Type::i8p(self.cx)); - self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); + let ptr = self.pointercast(ptr, self.cx.type_i8p()); + self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 4b4ccb3b60..f13eeb6692 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -15,18 +15,14 @@ //! closure. use attributes; -use common::{self, CodegenCx}; -use consts; -use declare; use llvm; use monomorphize::Instance; -use type_of::LayoutLlvmExt; +use context::CodegenCx; use value::Value; +use rustc_codegen_ssa::traits::*; -use rustc::hir::def_id::DefId; -use rustc::ty::{self, TypeFoldable}; -use rustc::ty::layout::LayoutOf; -use rustc::ty::subst::Substs; +use rustc::ty::TypeFoldable; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; /// Codegens a reference to a fn/method item, monomorphizing and /// inlining as it goes. @@ -39,27 +35,27 @@ pub fn get_fn( cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>, ) -> &'ll Value { - let tcx = cx.tcx; + let tcx = cx.tcx(); debug!("get_fn(instance={:?})", instance); assert!(!instance.substs.needs_infer()); - assert!(!instance.substs.has_escaping_regions()); + assert!(!instance.substs.has_escaping_bound_vars()); assert!(!instance.substs.has_param_types()); - let fn_ty = instance.ty(cx.tcx); - if let Some(&llfn) = cx.instances.borrow().get(&instance) { + let sig = instance.fn_sig(cx.tcx()); + if let Some(&llfn) = cx.instances().borrow().get(&instance) { return llfn; } let sym = tcx.symbol_name(instance).as_str(); - debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym); + debug!("get_fn({:?}: {:?}) => {}", instance, sig, sym); // Create a fn pointer with the substituted signature. - let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(cx, fn_ty)); - let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx); + let fn_ptr_ty = tcx.mk_fn_ptr(sig); + let llptrty = cx.backend_type(cx.layout_of(fn_ptr_ty)); - let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) { + let llfn = if let Some(llfn) = cx.get_declared_value(&sym) { // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external // functions. If you have two crates that both bind the same C @@ -83,16 +79,16 @@ pub fn get_fn( // This can occur on either a crate-local or crate-external // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - if common::val_ty(llfn) != llptrty { + if cx.val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - consts::ptrcast(llfn, llptrty) + cx.const_ptrcast(llfn, llptrty) } else { debug!("get_fn: not casting pointer!"); llfn } } else { - let llfn = declare::declare_fn(cx, &sym, fn_ty); - assert_eq!(common::val_ty(llfn), llptrty); + let llfn = cx.declare_fn(&sym, sig); + assert_eq!(cx.val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); if instance.def.is_inline(tcx) { @@ -204,19 +200,3 @@ pub fn get_fn( llfn } - -pub fn resolve_and_get_fn( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, -) -> &'ll Value { - get_fn( - cx, - ty::Instance::resolve( - cx.tcx, - ty::ParamEnv::reveal_all(), - def_id, - substs - ).unwrap() - ) -} diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index c08937fa9b..fd13421835 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -12,44 +12,26 @@ //! Code that is useful in various codegen modules. -use llvm::{self, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::hir::def_id::DefId; -use rustc::middle::lang_items::LangItem; +use llvm::{self, True, False, Bool, BasicBlock, OperandBundleDef}; use abi; -use base; -use builder::Builder; use consts; -use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; +use rustc_codegen_ssa::traits::*; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{HasDataLayout, LayoutOf}; -use rustc::hir; +use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; +use rustc::mir::interpret::{Scalar, AllocType, Allocation}; +use consts::const_alloc_to_llvm; +use rustc_codegen_ssa::mir::place::PlaceRef; use libc::{c_uint, c_char}; -use std::iter; -use rustc_target::spec::abi::Abi; use syntax::symbol::LocalInternedString; -use syntax_pos::{Span, DUMMY_SP}; +use syntax::ast::Mutability; pub use context::CodegenCx; -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} - /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -112,128 +94,302 @@ impl Funclet<'ll> { } } +impl BackendTypes for CodegenCx<'ll, 'tcx> { + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; + type Type = &'ll Type; + type Funclet = Funclet<'ll>; + + type DIScope = &'ll llvm::debuginfo::DIScope; +} + +impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn const_null(&self, t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstNull(t) + } + } + + fn const_undef(&self, t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMGetUndef(t) + } + } + + fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i as u64, True) + } + } + + fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i, False) + } + } + + fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { + unsafe { + let words = [u as u64, (u >> 64) as u64]; + llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + } + } + + fn const_bool(&self, val: bool) -> &'ll Value { + self.const_uint(self.type_i1(), val as u64) + } + + fn const_i32(&self, i: i32) -> &'ll Value { + self.const_int(self.type_i32(), i as i64) + } + + fn const_u32(&self, i: u32) -> &'ll Value { + self.const_uint(self.type_i32(), i as u64) + } + + fn const_u64(&self, i: u64) -> &'ll Value { + self.const_uint(self.type_i64(), i) + } + + fn const_usize(&self, i: u64) -> &'ll Value { + let bit_size = self.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1< &'ll Value { + self.const_uint(self.type_i8(), i as u64) + } + + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { + return llval; + } + + let sc = llvm::LLVMConstStringInContext(self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + self.const_cstr_cache.borrow_mut().insert(s, g); + g + } + } + + fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(self.const_cstr(s, false), + self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); + self.const_fat_ptr(cs, self.const_usize(len as u64)) + } + + fn const_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + self.const_struct(&[ptr, meta], false) + } + + fn const_struct( + &self, + elts: &[&'ll Value], + packed: bool + ) -> &'ll Value { + struct_in_context(self.llcx, elts, packed) + } + + fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } + } + + fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } + } + + fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + bytes_in_context(self.llcx, bytes) + } + + fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); + + r + } + } + + fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if self.is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } + } + + fn const_to_uint(&self, v: &'ll Value) -> u64 { + unsafe { + llvm::LLVMConstIntGetZExtValue(v) + } + } + + fn is_const_integral(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantInt(v).is_some() + } + } + + fn is_const_real(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } + } + + fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { + unsafe { + if self.is_const_integral(v) { + let (mut lo, mut hi) = (0u64, 0u64); + let success = llvm::LLVMRustConstInt128Get(v, sign_ext, + &mut hi, &mut lo); + if success { + Some(hi_lo_to_u128(lo, hi)) + } else { + None + } + } else { + None + } + } + } + + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: &'ll Type, + ) -> &'ll Value { + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; + match cv { + Scalar::Bits { size: 0, .. } => { + assert_eq!(0, layout.value.size(self).bytes()); + self.const_undef(self.type_ix(0)) + }, + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.value.size(self).bytes()); + let llval = self.const_uint_big(self.type_ix(bitsize), bits); + if layout.value == layout::Pointer { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + self.const_bitcast(llval, llty) + } + }, + Scalar::Ptr(ptr) => { + let alloc_type = self.tcx.alloc_map.lock().get(ptr.alloc_id); + let base_addr = match alloc_type { + Some(AllocType::Memory(alloc)) => { + let init = const_alloc_to_llvm(self, alloc); + if alloc.mutability == Mutability::Mutable { + self.static_addr_of_mut(init, alloc.align, None) + } else { + self.static_addr_of(init, alloc.align, None) + } + } + Some(AllocType::Function(fn_instance)) => { + self.get_fn(fn_instance) + } + Some(AllocType::Static(def_id)) => { + assert!(self.tcx.is_static(def_id).is_some()); + self.get_static(def_id) + } + None => bug!("missing allocation {:?}", ptr.alloc_id), + }; + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.const_bitcast(base_addr, self.type_i8p()), + &self.const_usize(ptr.offset.bytes()), + 1, + ) }; + if layout.value != layout::Pointer { + unsafe { llvm::LLVMConstPtrToInt(llval, llty) } + } else { + self.const_bitcast(llval, llty) + } + } + } + } + + fn from_const_alloc( + &self, + layout: TyLayout<'tcx>, + alloc: &Allocation, + offset: Size, + ) -> PlaceRef<'tcx, &'ll Value> { + let init = const_alloc_to_llvm(self, alloc); + let base_addr = self.static_addr_of(init, layout.align.abi, None); + + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.const_bitcast(base_addr, self.type_i8p()), + &self.const_usize(offset.bytes()), + 1, + )}; + let llval = self.const_bitcast(llval, self.type_ptr_to(layout.llvm_type(self))); + PlaceRef::new_sized(llval, layout, alloc.align) + } + + fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + consts::ptrcast(val, ty) + } +} + pub fn val_ty(v: &'ll Value) -> &'ll Type { unsafe { llvm::LLVMTypeOf(v) } } -// LLVM constant constructors. -pub fn C_null(t: &'ll Type) -> &'ll Value { +pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { - llvm::LLVMConstNull(t) + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); } } -pub fn C_undef(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMGetUndef(t) - } -} - -pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i as u64, True) - } -} - -pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i, False) - } -} - -pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { - unsafe { - let words = [u as u64, (u >> 64) as u64]; - llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) - } -} - -pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value { - C_uint(Type::i1(cx), val as u64) -} - -pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value { - C_int(Type::i32(cx), i as i64) -} - -pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value { - C_uint(Type::i32(cx), i as u64) -} - -pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { - C_uint(Type::i64(cx), i) -} - -pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { - let bit_size = cx.data_layout().pointer_size.bits(); - if bit_size < 64 { - // make sure it doesn't overflow - assert!(i < (1<, i: u8) -> &'ll Value { - C_uint(Type::i8(cx), i as u64) -} - - -// This is a 'c-like' raw string, which differs from -// our boxed-and-length-annotated strings. -pub fn C_cstr( - cx: &CodegenCx<'ll, '_>, - s: LocalInternedString, - null_terminated: bool, -) -> &'ll Value { - unsafe { - if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) { - return llval; - } - - let sc = llvm::LLVMConstStringInContext(cx.llcx, - s.as_ptr() as *const c_char, - s.len() as c_uint, - !null_terminated as Bool); - let sym = cx.generate_local_symbol_name("str"); - let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", sym); - }); - llvm::LLVMSetInitializer(g, sc); - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); - - cx.const_cstr_cache.borrow_mut().insert(s, g); - g - } -} - -// NB: Do not use `do_spill_noroot` to make this into a constant string, or -// you will be kicked off fast isel. See issue #4352 for an example of this. -pub fn C_str_slice(cx: &CodegenCx<'ll, '_>, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), - cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to()); - C_fat_ptr(cx, cs, C_usize(cx, len as u64)) -} - -pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(cx, &[ptr, meta], false) -} - -pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value { - C_struct_in_context(cx.llcx, elts, packed) -} - -pub fn C_struct_in_context( - llcx: &'ll llvm::Context, - elts: &[&'ll Value], +pub fn struct_in_context( + llcx: &'a llvm::Context, + elts: &[&'a Value], packed: bool, -) -> &'ll Value { +) -> &'a Value { unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, @@ -241,215 +397,7 @@ pub fn C_struct_in_context( } } -pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); - } -} - -pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); - } -} - -pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value { - C_bytes_in_context(cx.llcx, bytes) -} - -pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); - } -} - -pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); - - r - } -} - -pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None - } - } -} - -pub fn const_to_uint(v: &'ll Value) -> u64 { - unsafe { - llvm::LLVMConstIntGetZExtValue(v) - } -} - -pub fn is_const_integral(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantInt(v).is_some() - } -} - -pub fn is_const_real(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() - } -} - - #[inline] fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { ((hi as u128) << 64) | (lo as u128) } - -pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { - unsafe { - if is_const_integral(v) { - let (mut lo, mut hi) = (0u64, 0u64); - let success = llvm::LLVMRustConstInt128Get(v, sign_ext, - &mut hi, &mut lo); - if success { - Some(hi_lo_to_u128(lo, hi)) - } else { - None - } - } else { - None - } - } -} - -pub fn langcall(tcx: TyCtxt, - span: Option, - msg: &str, - li: LangItem) - -> DefId { - tcx.lang_items().require(li).unwrap_or_else(|s| { - let msg = format!("{} {}", msg, s); - match span { - Some(span) => tcx.sess.span_fatal(span, &msg[..]), - None => tcx.sess.fatal(&msg[..]), - } - }) -} - -// To avoid UB from LLVM, these two functions mask RHS with an -// appropriate mask unconditionally (i.e. the fallback behavior for -// all shifts). For 32- and 64-bit types, this matches the semantics -// of Java. (See related discussion on #1877 and #10183.) - -pub fn build_unchecked_lshift( - bx: &Builder<'a, 'll, 'tcx>, - lhs: &'ll Value, - rhs: &'ll Value -) -> &'ll Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - bx.shl(lhs, rhs) -} - -pub fn build_unchecked_rshift( - bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - let is_signed = lhs_t.is_signed(); - if is_signed { - bx.ashr(lhs, rhs) - } else { - bx.lshr(lhs, rhs) - } -} - -fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value { - let rhs_llty = val_ty(rhs); - bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) -} - -pub fn shift_mask_val( - bx: &Builder<'a, 'll, 'tcx>, - llty: &'ll Type, - mask_llty: &'ll Type, - invert: bool -) -> &'ll Value { - let kind = llty.kind(); - match kind { - TypeKind::Integer => { - // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. - let val = llty.int_width() - 1; - if invert { - C_int(mask_llty, !val as i64) - } else { - C_uint(mask_llty, val) - } - }, - TypeKind::Vector => { - let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert); - bx.vector_splat(mask_llty.vector_length(), mask) - }, - _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), - } -} - -pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ty: Ty<'tcx>) - -> ty::PolyFnSig<'tcx> -{ - match ty.sty { - ty::FnDef(..) | - // Shims currently have type FnPtr. Not sure this should remain. - ty::FnPtr(_) => ty.fn_sig(cx.tcx), - ty::Closure(def_id, substs) => { - let tcx = cx.tcx; - let sig = substs.closure_sig(def_id, tcx); - - let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); - sig.map_bound(|sig| tcx.mk_fn_sig( - iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), - sig.output(), - sig.variadic, - sig.unsafety, - sig.abi - )) - } - ty::Generator(def_id, substs, _) => { - let tcx = cx.tcx; - let sig = substs.poly_sig(def_id, cx.tcx); - - let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); - let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); - - sig.map_bound(|sig| { - let state_did = tcx.lang_items().gen_state().unwrap(); - let state_adt_ref = tcx.adt_def(state_did); - let state_substs = tcx.intern_substs(&[ - sig.yield_ty.into(), - sig.return_ty.into(), - ]); - let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); - - tcx.mk_fn_sig(iter::once(env_ty), - ret_ty, - false, - hir::Unsafety::Normal, - Abi::Rust - ) - }) - } - _ => bug!("unexpected type {:?} to ty_fn_sig", ty) - } -} diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 9228870bf3..5311a6a373 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -11,36 +11,80 @@ use libc::c_uint; use llvm::{self, SetUnnamedAddr, True}; use rustc::hir::def_id::DefId; +use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, + Pointer, ErrorHandled, GlobalId}; use rustc::hir::Node; use debuginfo; -use base; use monomorphize::MonoItem; -use common::{CodegenCx, val_ty}; -use declare; +use common::CodegenCx; use monomorphize::Instance; use syntax_pos::Span; +use rustc_target::abi::HasDataLayout; use syntax_pos::symbol::LocalInternedString; +use base; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; +use rustc_codegen_ssa::traits::*; -use rustc::ty::layout::{Align, LayoutOf}; +use rustc::ty::layout::{self, Size, Align, LayoutOf}; use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; use std::ffi::{CStr, CString}; -pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstPointerCast(val, ty) +pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { + let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); + let dl = cx.data_layout(); + let pointer_size = dl.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, ((), alloc_id)) in alloc.relocations.iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset])); + } + let ptr_offset = read_target_uint( + dl.endian, + &alloc.bytes[offset..(offset + pointer_size)], + ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; + llvals.push(cx.scalar_to_backend( + Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), + &layout::Scalar { + value: layout::Primitive::Pointer, + valid_range: 0..=!0 + }, + cx.type_i8p() + )); + next_offset = offset + pointer_size; } + if alloc.bytes.len() >= next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..])); + } + + cx.const_struct(&llvals, true) } -pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstBitCast(val, ty) - } +pub fn codegen_static_initializer( + cx: &CodegenCx<'ll, 'tcx>, + def_id: DefId, +) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> { + let instance = ty::Instance::mono(cx.tcx, def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + let param_env = ty::ParamEnv::reveal_all(); + let static_ = cx.tcx.const_eval(param_env.and(cid))?; + + let alloc = match static_.val { + ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, + _ => bug!("static const eval returned {:#?}", static_), + }; + Ok((const_alloc_to_llvm(cx, alloc), alloc)) } fn set_global_alignment(cx: &CodegenCx<'ll, '_>, @@ -50,7 +94,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // which can force it to be smaller. Rust doesn't support this yet. if let Some(min) = cx.sess().target.target.options.min_global_align { - match ty::layout::Align::from_bits(min, min) { + match Align::from_bits(min) { Ok(min) => align = align.max(min), Err(err) => { cx.sess().err(&format!("invalid minimum global alignment: {}", err)); @@ -58,181 +102,10 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, } } unsafe { - llvm::LLVMSetAlignment(gv, align.abi() as u32); + llvm::LLVMSetAlignment(gv, align.bytes() as u32); } } -pub fn addr_of_mut( - cx: &CodegenCx<'ll, '_>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - unsafe { - let gv = match kind { - Some(kind) if !cx.tcx.sess.fewer_names() => { - let name = cx.generate_local_symbol_name(kind); - let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", name); - }); - llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); - gv - }, - _ => declare::define_private_global(cx, val_ty(cv)), - }; - llvm::LLVMSetInitializer(gv, cv); - set_global_alignment(cx, gv, align); - SetUnnamedAddr(gv, true); - gv - } -} - -pub fn addr_of( - cx: &CodegenCx<'ll, '_>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - if let Some(&gv) = cx.const_globals.borrow().get(&cv) { - unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - let llalign = align.abi() as u32; - if llalign > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, llalign); - } - } - return gv; - } - let gv = addr_of_mut(cx, cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } - cx.const_globals.borrow_mut().insert(cv, gv); - gv -} - -pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value { - let instance = Instance::mono(cx.tcx, def_id); - if let Some(&g) = cx.instances.borrow().get(&instance) { - return g; - } - - let defined_in_current_codegen_unit = cx.codegen_unit - .items() - .contains_key(&MonoItem::Static(def_id)); - assert!(!defined_in_current_codegen_unit, - "consts::get_static() should always hit the cache for \ - statics defined in the same CGU, but did not for `{:?}`", - def_id); - - let ty = instance.ty(cx.tcx); - let sym = cx.tcx.symbol_name(instance).as_str(); - - debug!("get_static: sym={} instance={:?}", sym, instance); - - let g = if let Some(id) = cx.tcx.hir.as_local_node_id(def_id) { - - let llty = cx.layout_of(ty).llvm_type(cx); - let (g, attrs) = match cx.tcx.hir.get(id) { - Node::Item(&hir::Item { - ref attrs, span, node: hir::ItemKind::Static(..), .. - }) => { - if declare::get_declared_value(cx, &sym[..]).is_some() { - span_bug!(span, "Conflicting symbol names for static?"); - } - - let g = declare::define_global(cx, &sym[..], llty).unwrap(); - - if !cx.tcx.is_reachable_non_generic(def_id) { - unsafe { - llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); - } - } - - (g, attrs) - } - - Node::ForeignItem(&hir::ForeignItem { - ref attrs, span, node: hir::ForeignItemKind::Static(..), .. - }) => { - let fn_attrs = cx.tcx.codegen_fn_attrs(def_id); - (check_and_apply_linkage(cx, &fn_attrs, ty, sym, Some(span)), attrs) - } - - item => bug!("get_static: expected static, found {:?}", item) - }; - - debug!("get_static: sym={} attrs={:?}", sym, attrs); - - for attr in attrs { - if attr.check_name("thread_local") { - llvm::set_thread_local_mode(g, cx.tls_model); - } - } - - g - } else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - debug!("get_static: sym={} item_attr={:?}", sym, cx.tcx.item_attrs(def_id)); - - let attrs = cx.tcx.codegen_fn_attrs(def_id); - let g = check_and_apply_linkage(cx, &attrs, ty, sym, None); - - // Thread-local statics in some other crate need to *always* be linked - // against in a thread-local fashion, so we need to be sure to apply the - // thread-local attribute locally if it was present remotely. If we - // don't do this then linker errors can be generated where the linker - // complains that one object files has a thread local version of the - // symbol and another one doesn't. - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - } - - let needs_dll_storage_attr = - cx.use_dll_storage_attrs && !cx.tcx.is_foreign_item(def_id) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!(!(cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - cx.tcx.sess.target.target.options.is_like_msvc && - cx.tcx.sess.opts.cg.prefer_dynamic)); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e. it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to - // make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !cx.tcx.is_codegened_item(def_id) { - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - } - g - }; - - if cx.use_dll_storage_attrs && cx.tcx.is_dllimport_foreign_item(def_id) { - // For foreign (native) libs we know the exact storage type to use. - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - - cx.instances.borrow_mut().insert(instance, g); - g -} - fn check_and_apply_linkage( cx: &CodegenCx<'ll, 'tcx>, attrs: &CodegenFnAttrs, @@ -260,7 +133,7 @@ fn check_and_apply_linkage( }; unsafe { // Declare a symbol `foo` with the desired linkage. - let g1 = declare::declare_global(cx, &sym, llty2); + let g1 = cx.declare_global(&sym, llty2); llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -271,7 +144,7 @@ fn check_and_apply_linkage( // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{ + let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{ if let Some(span) = span { cx.sess().span_fatal( span, @@ -288,150 +161,338 @@ fn check_and_apply_linkage( } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global - declare::declare_global(cx, &sym, llty) + cx.declare_global(&sym, llty) } } -pub fn codegen_static<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx>, - def_id: DefId, - is_mutable: bool, -) { +pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { - let attrs = cx.tcx.codegen_fn_attrs(def_id); + llvm::LLVMConstPointerCast(val, ty) + } +} - let (v, alloc) = match ::mir::codegen_static_initializer(cx, def_id) { - Ok(v) => v, - // Error has already been reported - Err(_) => return, - }; +impl CodegenCx<'ll, 'tcx> { + crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstBitCast(val, ty) + } + } - let g = get_static(cx, def_id); + crate fn static_addr_of_mut( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + unsafe { + let gv = match kind { + Some(kind) if !self.tcx.sess.fewer_names() => { + let name = self.generate_local_symbol_name(kind); + let gv = self.define_global(&name[..], + self.val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); + }); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + gv + }, + _ => self.define_private_global(self.val_ty(cv)), + }; + llvm::LLVMSetInitializer(gv, cv); + set_global_alignment(&self, gv, align); + SetUnnamedAddr(gv, true); + gv + } + } - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = val_ty(v); - let v = if val_llty == Type::i1(cx) { - val_llty = Type::i8(cx); - llvm::LLVMConstZExt(v, val_llty) - } else { - v - }; + crate fn get_static(&self, def_id: DefId) -> &'ll Value { + let instance = Instance::mono(self.tcx, def_id); + if let Some(&g) = self.instances.borrow().get(&instance) { + return g; + } + + let defined_in_current_codegen_unit = self.codegen_unit + .items() + .contains_key(&MonoItem::Static(def_id)); + assert!(!defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id); + + let ty = instance.ty(self.tcx); + let sym = self.tcx.symbol_name(instance).as_str(); + + debug!("get_static: sym={} instance={:?}", sym, instance); + + let g = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { + + let llty = self.layout_of(ty).llvm_type(self); + let (g, attrs) = match self.tcx.hir.get(id) { + Node::Item(&hir::Item { + ref attrs, span, node: hir::ItemKind::Static(..), .. + }) => { + if self.get_declared_value(&sym[..]).is_some() { + span_bug!(span, "Conflicting symbol names for static?"); + } + + let g = self.define_global(&sym[..], llty).unwrap(); + + if !self.tcx.is_reachable_non_generic(def_id) { + unsafe { + llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); + } + } + + (g, attrs) + } + + Node::ForeignItem(&hir::ForeignItem { + ref attrs, span, node: hir::ForeignItemKind::Static(..), .. + }) => { + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + (check_and_apply_linkage(&self, &fn_attrs, ty, sym, Some(span)), attrs) + } + + item => bug!("get_static: expected static, found {:?}", item) + }; + + debug!("get_static: sym={} attrs={:?}", sym, attrs); + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local_mode(g, self.tls_model); + } + } - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - let g = if val_llty == llty { g } else { - // If we created the global with the wrong type, - // correct the type. - let empty_string = const_cstr!(""); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); - let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(g, empty_string.as_ptr()); + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); - let linkage = llvm::LLVMRustGetLinkage(g); - let visibility = llvm::LLVMRustGetVisibility(g); + let attrs = self.tcx.codegen_fn_attrs(def_id); + let g = check_and_apply_linkage(&self, &attrs, ty, sym, None); - let new_g = llvm::LLVMRustGetOrInsertGlobal( - cx.llmod, name_string.as_ptr(), val_llty); + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + } - llvm::LLVMRustSetLinkage(new_g, linkage); - llvm::LLVMRustSetVisibility(new_g, visibility); + let needs_dll_storage_attr = + self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the attrs. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - cx.statics_to_rauw.borrow_mut().push((g, new_g)); - new_g + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!(!(self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + self.tcx.sess.target.target.options.is_like_msvc && + self.tcx.sess.opts.cg.prefer_dynamic)); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e. it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + } + g }; - set_global_alignment(cx, g, cx.align_of(ty)); - llvm::LLVMSetInitializer(g, v); - // As an optimization, all shared statics which do not have interior - // mutability are placed into read-only memory. - if !is_mutable { - if cx.type_is_freeze(ty) { - llvm::LLVMSetGlobalConstant(g, llvm::True); + if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + // For foreign (native) libs we know the exact storage type to use. + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); } } - debuginfo::create_global_var_metadata(cx, def_id, g); + self.instances.borrow_mut().insert(instance, g); + g + } +} - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - - // Do not allow LLVM to change the alignment of a TLS on macOS. - // - // By default a global's alignment can be freely increased. - // This allows LLVM to generate more performant instructions - // e.g. using load-aligned into a SIMD register. - // - // However, on macOS 10.10 or below, the dynamic linker does not - // respect any alignment given on the TLS (radar 24221680). - // This will violate the alignment assumption, and causing segfault at runtime. - // - // This bug is very easy to trigger. In `println!` and `panic!`, - // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, - // which the values would be `mem::replace`d on initialization. - // The implementation of `mem::replace` will use SIMD - // whenever the size is 32 bytes or higher. LLVM notices SIMD is used - // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, - // which macOS's dyld disregarded and causing crashes - // (see issues #51794, #51758, #50867, #48866 and #44056). - // - // To workaround the bug, we trick LLVM into not increasing - // the global's alignment by explicitly assigning a section to it - // (equivalent to automatically generating a `#[link_section]` attribute). - // See the comment in the `GlobalValue::canIncreaseAlignment()` function - // of `lib/IR/Globals.cpp` for why this works. - // - // When the alignment is not increased, the optimized `mem::replace` - // will use load-unaligned instructions instead, and thus avoiding the crash. - // - // We could remove this hack whenever we decide to drop macOS 10.10 support. - if cx.tcx.sess.target.target.options.is_like_osx { - let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") - } else { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") - }; - llvm::LLVMSetSection(g, sect_name.as_ptr()); +impl StaticMethods for CodegenCx<'ll, 'tcx> { + fn static_addr_of( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + if let Some(&gv) = self.const_globals.borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.bytes() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } } + return gv; } + let gv = self.static_addr_of_mut(cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + self.const_globals.borrow_mut().insert(cv, gv); + gv + } + fn codegen_static( + &self, + def_id: DefId, + is_mutable: bool, + ) { + unsafe { + let attrs = self.tcx.codegen_fn_attrs(def_id); - // Wasm statics with custom link sections get special treatment as they - // go into custom sections of the wasm executable. - if cx.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { - if let Some(section) = attrs.link_section { - let section = llvm::LLVMMDStringInContext( - cx.llcx, - section.as_str().as_ptr() as *const _, - section.as_str().len() as c_uint, - ); - let alloc = llvm::LLVMMDStringInContext( - cx.llcx, - alloc.bytes.as_ptr() as *const _, - alloc.bytes.len() as c_uint, - ); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext(cx.llcx, data.as_ptr(), 2); - llvm::LLVMAddNamedMetadataOperand( - cx.llmod, - "wasm.custom_sections\0".as_ptr() as *const _, - meta, - ); + let (v, alloc) = match codegen_static_initializer(&self, def_id) { + Ok(v) => v, + // Error has already been reported + Err(_) => return, + }; + + let g = self.get_static(def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let mut val_llty = self.val_ty(v); + let v = if val_llty == self.type_i1() { + val_llty = self.type_i8(); + llvm::LLVMConstZExt(v, val_llty) + } else { + v + }; + + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); + let g = if val_llty == llty { + g + } else { + // If we created the global with the wrong type, + // correct the type. + let empty_string = const_cstr!(""); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); + + let linkage = llvm::LLVMRustGetLinkage(g); + let visibility = llvm::LLVMRustGetVisibility(g); + + let new_g = llvm::LLVMRustGetOrInsertGlobal( + self.llmod, name_string.as_ptr(), val_llty); + + llvm::LLVMRustSetLinkage(new_g, linkage); + llvm::LLVMRustSetVisibility(new_g, visibility); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + self.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(&self, g, self.align_of(ty)); + llvm::LLVMSetInitializer(g, v); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if self.type_is_freeze(ty) { + llvm::LLVMSetGlobalConstant(g, llvm::True); + } } - } else { - base::set_link_section(g, &attrs); - } - if attrs.flags.contains(CodegenFnAttrFlags::USED) { - // This static will be stored in the llvm.used variable which is an array of i8* - let cast = llvm::LLVMConstPointerCast(g, Type::i8p(cx)); - cx.used_statics.borrow_mut().push(cast); + debuginfo::create_global_var_metadata(&self, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g. using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if self.tcx.sess.target.target.options.is_like_osx { + let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") + } else { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") + }; + llvm::LLVMSetSection(g, sect_name.as_ptr()); + } + } + + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(section) = attrs.link_section { + let section = llvm::LLVMMDStringInContext( + self.llcx, + section.as_str().as_ptr() as *const _, + section.as_str().len() as c_uint, + ); + let alloc = llvm::LLVMMDStringInContext( + self.llcx, + alloc.bytes.as_ptr() as *const _, + alloc.bytes.len() as c_uint, + ); + let data = [section, alloc]; + let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2); + llvm::LLVMAddNamedMetadataOperand( + self.llmod, + "wasm.custom_sections\0".as_ptr() as *const _, + meta, + ); + } + } else { + base::set_link_section(g, &attrs); + } + + if attrs.flags.contains(CodegenFnAttrFlags::USED) { + // This static will be stored in the llvm.used variable which is an array of i8* + let cast = llvm::LLVMConstPointerCast(g, self.type_i8p()); + self.used_statics.borrow_mut().push(cast); + } } } } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 826df82193..564e424cf6 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -9,30 +9,32 @@ // except according to those terms. use attributes; -use common; use llvm; +use llvm_util; use rustc::dep_graph::DepGraphSafe; use rustc::hir; use debuginfo; -use callee; -use base; -use declare; use monomorphize::Instance; use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; +use rustc_codegen_ssa::traits::*; +use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; use rustc::mir::mono::Stats; use rustc::session::config::{self, DebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout, VariantIdx}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_target::spec::{HasTargetSpec, Target}; +use rustc_codegen_ssa::callee::resolve_and_get_fn; +use rustc_codegen_ssa::base::wants_msvc_seh; +use callee::get_fn; use std::ffi::CStr; use std::cell::{Cell, RefCell}; @@ -45,24 +47,23 @@ use abi::Abi; /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub struct CodegenCx<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub struct CodegenCx<'ll, 'tcx: 'll> { + pub tcx: TyCtxt<'ll, 'tcx, 'tcx>, pub check_overflow: bool, pub use_dll_storage_attrs: bool, pub tls_model: llvm::ThreadLocalMode, - pub llmod: &'a llvm::Module, - pub llcx: &'a llvm::Context, + pub llmod: &'ll llvm::Module, + pub llcx: &'ll llvm::Context, pub stats: RefCell, pub codegen_unit: Arc>, /// Cache instances of monomorphic and polymorphic items - pub instances: RefCell, &'a Value>>, + pub instances: RefCell, &'ll Value>>, /// Cache generated vtables - pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), - &'a Value>>, + pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>, /// Cache of constant strings, - pub const_cstr_cache: RefCell>, + pub const_cstr_cache: RefCell>, /// Reverse-direction for const ptrs cast from globals. /// Key is a Value holding a *T, @@ -72,40 +73,39 @@ pub struct CodegenCx<'a, 'tcx: 'a> { /// when we ptrcast, and we have to ptrcast during codegen /// of a [T] const because we form a slice, a (*T,usize) pair, not /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell>, + pub const_unsized: RefCell>, /// Cache of emitted const globals (value -> global) - pub const_globals: RefCell>, + pub const_globals: RefCell>, /// List of globals for static variables which need to be passed to the /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete. /// (We have to make sure we don't invalidate any Values referring /// to constants.) - pub statics_to_rauw: RefCell>, + pub statics_to_rauw: RefCell>, /// Statics that will be placed in the llvm.used variable /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details - pub used_statics: RefCell>, + pub used_statics: RefCell>, - pub lltypes: RefCell, Option), &'a Type>>, - pub scalar_lltypes: RefCell, &'a Type>>, + pub lltypes: RefCell, Option), &'ll Type>>, + pub scalar_lltypes: RefCell, &'ll Type>>, pub pointee_infos: RefCell, Size), Option>>, - pub isize_ty: &'a Type, + pub isize_ty: &'ll Type, - pub dbg_cx: Option>, + pub dbg_cx: Option>, - eh_personality: Cell>, - eh_unwind_resume: Cell>, - pub rust_try_fn: Cell>, + eh_personality: Cell>, + eh_unwind_resume: Cell>, + pub rust_try_fn: Cell>, - intrinsics: RefCell>, + intrinsics: RefCell>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell, } -impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> { -} +impl<'ll, 'tcx> DepGraphSafe for CodegenCx<'ll, 'tcx> {} pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { @@ -218,11 +218,11 @@ pub unsafe fn create_module( llmod } -impl<'a, 'tcx> CodegenCx<'a, 'tcx> { - crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, +impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { + crate fn new(tcx: TyCtxt<'ll, 'tcx, 'tcx>, codegen_unit: Arc>, - llvm_module: &'a ::ModuleLlvm) - -> CodegenCx<'a, 'tcx> { + llvm_module: &'ll ::ModuleLlvm) + -> Self { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these @@ -314,36 +314,32 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { local_gen_sym_counter: Cell::new(0), } } + + crate fn statics_to_rauw(&self) -> &RefCell> { + &self.statics_to_rauw + } } -impl<'b, 'tcx> CodegenCx<'b, 'tcx> { - pub fn sess<'a>(&'a self) -> &'a Session { - &self.tcx.sess +impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn vtables(&self) -> &RefCell, + ty::PolyExistentialTraitRef<'tcx>), &'ll Value>> + { + &self.vtables } - pub fn get_intrinsic(&self, key: &str) -> &'b Value { - if let Some(v) = self.intrinsics.borrow().get(key).cloned() { - return v; - } - - declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + fn instances(&self) -> &RefCell, &'ll Value>> { + &self.instances } - /// Generate a new symbol name with the given prefix. This symbol name must - /// only be used for definitions with `internal` or `private` linkage. - pub fn generate_local_symbol_name(&self, prefix: &str) -> String { - let idx = self.local_gen_sym_counter.get(); - self.local_gen_sym_counter.set(idx + 1); - // Include a '.' character, so there can be no accidental conflicts with - // user defined names - let mut name = String::with_capacity(prefix.len() + 6); - name.push_str(prefix); - name.push_str("."); - base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); - name + fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value { + get_fn(self, instance) } - pub fn eh_personality(&self) -> &'b Value { + fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { + llvm::get_param(llfn, index) + } + + fn eh_personality(&self) -> &'ll Value { // The exception handling personality function. // // If our compilation unit has the `eh_personality` lang item somewhere @@ -369,17 +365,17 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { } let tcx = self.tcx; let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.sess()) => { - callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) + Some(def_id) if !wants_msvc_seh(self.sess()) => { + resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) } _ => { - let name = if base::wants_msvc_seh(self.sess()) { + let name = if wants_msvc_seh(self.sess()) { "__CxxFrameHandler3" } else { "rust_eh_personality" }; - let fty = Type::variadic_func(&[], Type::i32(self)); - declare::declare_cfn(self, name, fty) + let fty = self.type_variadic_func(&[], self.type_i32()); + self.declare_cfn(name, fty) } }; attributes::apply_target_cpu_attr(self, llfn); @@ -389,7 +385,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { // Returns a Value of the "eh_unwind_resume" lang item if one is defined, // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> &'b Value { + fn eh_unwind_resume(&self) -> &'ll Value { use attributes; let unwresume = &self.eh_unwind_resume; if let Some(llfn) = unwresume.get() { @@ -399,76 +395,446 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { let tcx = self.tcx; assert!(self.sess().target.target.options.custom_unwind_resume); if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { - let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); + let llfn = resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); unwresume.set(Some(llfn)); return llfn; } - let ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig( + let sig = ty::Binder::bind(tcx.mk_fn_sig( iter::once(tcx.mk_mut_ptr(tcx.types.u8)), tcx.types.never, false, hir::Unsafety::Unsafe, Abi::C - ))); + )); - let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty); + let llfn = self.declare_fn("rust_eh_unwind_resume", sig); attributes::unwind(llfn, true); attributes::apply_target_cpu_attr(self, llfn); unwresume.set(Some(llfn)); llfn } - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx, ty) + fn sess(&self) -> &Session { + &self.tcx.sess } - pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx, ty) + fn check_overflow(&self) -> bool { + self.check_overflow } - pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx, ty) + fn stats(&self) -> &RefCell { + &self.stats } - pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - use syntax_pos::DUMMY_SP; - if ty.is_sized(self.tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) { - return false; - } + fn consume_stats(self) -> RefCell { + self.stats + } - let tail = self.tcx.struct_tail(ty); - match tail.sty { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), + fn codegen_unit(&self) -> &Arc> { + &self.codegen_unit + } + + fn used_statics(&self) -> &RefCell> { + &self.used_statics + } + + fn set_frame_pointer_elimination(&self, llfn: &'ll Value) { + attributes::set_frame_pointer_elimination(self, llfn) + } + + fn apply_target_cpu_attr(&self, llfn: &'ll Value) { + attributes::apply_target_cpu_attr(self, llfn) + } + + fn closure_env_needs_indirect_debuginfo(&self) -> bool { + llvm_util::get_major_version() < 6 + } + + fn create_used_variable(&self) { + let name = const_cstr!("llvm.used"); + let section = const_cstr!("llvm.metadata"); + let array = self.const_array( + &self.type_ptr_to(self.type_i8()), + &*self.used_statics.borrow() + ); + + unsafe { + let g = llvm::LLVMAddGlobal(self.llmod, + self.val_ty(array), + name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); } } } -impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx> { +impl CodegenCx<'b, 'tcx> { + crate fn get_intrinsic(&self, key: &str) -> &'b Value { + if let Some(v) = self.intrinsics.borrow().get(key).cloned() { + return v; + } + + self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + } + + fn declare_intrinsic( + &self, + key: &str + ) -> Option<&'b Value> { + macro_rules! ifn { + ($name:expr, fn() -> $ret:expr) => ( + if key == $name { + let f = self.declare_cfn($name, self.type_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn(...) -> $ret:expr) => ( + if key == $name { + let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( + if key == $name { + let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + } + macro_rules! mk_struct { + ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false)) + } + + let i8p = self.type_i8p(); + let void = self.type_void(); + let i1 = self.type_i1(); + let t_i8 = self.type_i8(); + let t_i16 = self.type_i16(); + let t_i32 = self.type_i32(); + let t_i64 = self.type_i64(); + let t_i128 = self.type_i128(); + let t_f32 = self.type_f32(); + let t_f64 = self.type_f64(); + + let t_v2f32 = self.type_vector(t_f32, 2); + let t_v4f32 = self.type_vector(t_f32, 4); + let t_v8f32 = self.type_vector(t_f32, 8); + let t_v16f32 = self.type_vector(t_f32, 16); + + let t_v2f64 = self.type_vector(t_f64, 2); + let t_v4f64 = self.type_vector(t_f64, 4); + let t_v8f64 = self.type_vector(t_f64, 8); + + ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + + ifn!("llvm.trap", fn() -> void); + ifn!("llvm.debugtrap", fn() -> void); + ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + + ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); + ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); + ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); + ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); + ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); + ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); + ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); + + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); + ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.round.f32", fn(t_f32) -> t_f32); + ifn!("llvm.round.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); + ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); + ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); + ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); + ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); + ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); + ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8); + ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16); + ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32); + ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64); + ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128); + + ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8); + ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16); + ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32); + ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64); + ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128); + + ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); + ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); + ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.localescape", fn(...) -> void); + ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); + ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + + ifn!("llvm.assume", fn(i1) -> void); + ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + + // variadic intrinsics + ifn!("llvm.va_start", fn(i8p) -> void); + ifn!("llvm.va_end", fn(i8p) -> void); + ifn!("llvm.va_copy", fn(i8p, i8p) -> void); + + if self.sess().opts.debuginfo != DebugInfo::None { + ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void); + ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void); + } + return None; + } +} + +impl<'b, 'tcx> CodegenCx<'b, 'tcx> { + /// Generate a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local_gen_sym_counter.get(); + self.local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); + name + } +} + +impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> { fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.tcx.data_layout } } -impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx> { +impl HasTargetSpec for CodegenCx<'ll, 'tcx> { fn target_spec(&self) -> &Target { &self.tcx.sess.target.target } } -impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx> { - fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { +impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { self.tcx } } -impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { +impl LayoutOf for CodegenCx<'ll, 'tcx> { type Ty = Ty<'tcx>; type TyLayout = TyLayout<'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)) .unwrap_or_else(|e| if let LayoutError::SizeOverflow(_) = e { self.sess().fatal(&e.to_string()) @@ -477,307 +843,3 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { }) } } - -/// Declare any llvm intrinsics that you might need -fn declare_intrinsic(cx: &CodegenCx<'ll, '_>, key: &str) -> Option<&'ll Value> { - macro_rules! ifn { - ($name:expr, fn() -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn(...) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - } - macro_rules! mk_struct { - ($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false)) - } - - let i8p = Type::i8p(cx); - let void = Type::void(cx); - let i1 = Type::i1(cx); - let t_i8 = Type::i8(cx); - let t_i16 = Type::i16(cx); - let t_i32 = Type::i32(cx); - let t_i64 = Type::i64(cx); - let t_i128 = Type::i128(cx); - let t_f32 = Type::f32(cx); - let t_f64 = Type::f64(cx); - - let t_v2f32 = Type::vector(t_f32, 2); - let t_v4f32 = Type::vector(t_f32, 4); - let t_v8f32 = Type::vector(t_f32, 8); - let t_v16f32 = Type::vector(t_f32, 16); - - let t_v2f64 = Type::vector(t_f64, 2); - let t_v4f64 = Type::vector(t_f64, 4); - let t_v8f64 = Type::vector(t_f64, 8); - - ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); - - ifn!("llvm.trap", fn() -> void); - ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); - - ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); - ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); - ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); - ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); - ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); - ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); - ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); - ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); - ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); - - ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); - ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); - ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); - ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); - ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); - ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); - ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); - ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); - ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); - ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); - ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); - ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.round.f32", fn(t_f32) -> t_f32); - ifn!("llvm.round.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); - ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); - ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); - ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); - ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); - ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); - ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); - ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); - - ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); - ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); - - ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); - - if cx.sess().opts.debuginfo != DebugInfo::None { - ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void); - ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void); - } - - None -} diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index 56352ae963..c18e126e52 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -8,12 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{FunctionDebugContext, FunctionDebugContextData}; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, FunctionDebugContextData, MirDebugScope}; use super::metadata::file_metadata; use super::utils::{DIB, span_start}; use llvm; -use llvm::debuginfo::DIScope; +use llvm::debuginfo::{DIScope, DISubprogram}; use common::CodegenCx; use rustc::mir::{Mir, SourceScope}; @@ -26,28 +26,13 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use syntax_pos::BytePos; -#[derive(Clone, Copy, Debug)] -pub struct MirDebugScope<'ll> { - pub scope_metadata: Option<&'ll DIScope>, - // Start and end offsets of the file to which this DIScope belongs. - // These are used to quickly determine whether some span refers to the same file. - pub file_start_pos: BytePos, - pub file_end_pos: BytePos, -} - -impl MirDebugScope<'ll> { - pub fn is_valid(&self) -> bool { - self.scope_metadata.is_some() - } -} - /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes( cx: &CodegenCx<'ll, '_>, mir: &Mir, - debug_context: &FunctionDebugContext<'ll>, -) -> IndexVec> { + debug_context: &FunctionDebugContext<&'ll DISubprogram>, +) -> IndexVec> { let null_scope = MirDebugScope { scope_metadata: None, file_start_pos: BytePos(0), @@ -82,9 +67,9 @@ pub fn create_mir_scopes( fn make_mir_scope(cx: &CodegenCx<'ll, '_>, mir: &Mir, has_variables: &BitSet, - debug_context: &FunctionDebugContextData<'ll>, + debug_context: &FunctionDebugContextData<&'ll DISubprogram>, scope: SourceScope, - scopes: &mut IndexVec>) { + scopes: &mut IndexVec>) { if scopes[scope].is_valid() { return; } diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index f6faddb894..4be93d826b 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -12,24 +12,23 @@ use llvm; -use common::{C_bytes, CodegenCx, C_i32}; +use common::CodegenCx; use builder::Builder; -use declare; use rustc::session::config::DebugInfo; -use type_::Type; use value::Value; +use rustc_codegen_ssa::traits::*; use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { - if needs_gdb_debug_scripts_section(bx.cx) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx); +pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) { + if needs_gdb_debug_scripts_section(bx) { + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)]; + let indices = [bx.const_i32(0), bx.const_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { @@ -55,15 +54,15 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array(Type::i8(cx), + let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64); - let section_var = declare::define_global(cx, section_var_name, + let section_var = cx.define_global(section_var_name, llvm_type).unwrap_or_else(||{ bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); - llvm::LLVMSetInitializer(section_var, C_bytes(cx, section_contents)); + llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 846d505641..5a1c62e253 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -17,12 +17,14 @@ use super::utils::{debug_context, DIB, span_start, use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; +use rustc_codegen_ssa::traits::*; use abi; use value::Value; use llvm; -use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, +use llvm::debuginfo::{DIArray, DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DILexicalBlock, DIFlags}; +use llvm_util; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc::hir::CodegenFnAttrFlags; @@ -33,11 +35,14 @@ use rustc_data_structures::fingerprint::Fingerprint; use rustc::ty::Instance; use common::CodegenCx; use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, LayoutOf, PrimitiveExt, Size, TyLayout}; +use rustc::ty::layout::{self, Align, Integer, IntegerExt, LayoutOf, + PrimitiveExt, Size, TyLayout}; +use rustc::ty::subst::UnpackedKind; use rustc::session::config; use rustc::util::nodemap::FxHashMap; -use rustc_fs_util::path2cstr; +use rustc_fs_util::path_to_c_string; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_target::abi::HasDataLayout; use libc::{c_uint, c_longlong}; use std::ffi::CString; @@ -205,6 +210,7 @@ enum RecursiveTypeDescription<'ll, 'tcx> { unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: &'ll DICompositeType, + member_holding_stub: &'ll DICompositeType, member_description_factory: MemberDescriptionFactory<'ll, 'tcx>, }, FinalMetadata(&'ll DICompositeType) @@ -215,6 +221,7 @@ fn create_and_register_recursive_type_forward_declaration( unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: &'ll DICompositeType, + member_holding_stub: &'ll DICompositeType, member_description_factory: MemberDescriptionFactory<'ll, 'tcx>, ) -> RecursiveTypeDescription<'ll, 'tcx> { @@ -227,6 +234,7 @@ fn create_and_register_recursive_type_forward_declaration( unfinished_type, unique_type_id, metadata_stub, + member_holding_stub, member_description_factory, } } @@ -242,6 +250,7 @@ impl RecursiveTypeDescription<'ll, 'tcx> { unfinished_type, unique_type_id, metadata_stub, + member_holding_stub, ref member_description_factory, } => { // Make sure that we have a forward declaration of the type in @@ -266,7 +275,8 @@ impl RecursiveTypeDescription<'ll, 'tcx> { // ... and attach them to the stub to complete it. set_members_of_composite_type(cx, - metadata_stub, + unfinished_type, + member_holding_stub, member_descriptions); return MetadataCreationResult::new(metadata_stub, true); } @@ -316,7 +326,7 @@ fn fixed_vec_metadata( llvm::LLVMRustDIBuilderCreateArrayType( DIB(cx), size.bits(), - align.abi_bits() as u32, + align.bits() as u32, element_type_metadata, subscripts) }; @@ -350,6 +360,7 @@ fn vec_slice_metadata( size: pointer_size, align: pointer_align, flags: DIFlags::FlagZero, + discriminant: None, }, MemberDescription { name: "length".to_owned(), @@ -358,6 +369,7 @@ fn vec_slice_metadata( size: usize_size, align: usize_align, flags: DIFlags::FlagZero, + discriminant: None, }, ]; @@ -456,16 +468,18 @@ fn trait_pointer_metadata( syntax_pos::DUMMY_SP), offset: layout.fields.offset(0), size: data_ptr_field.size, - align: data_ptr_field.align, + align: data_ptr_field.align.abi, flags: DIFlags::FlagArtificial, + discriminant: None, }, MemberDescription { name: "vtable".to_owned(), type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), offset: layout.fields.offset(1), size: vtable_field.size, - align: vtable_field.align, + align: vtable_field.align.abi, flags: DIFlags::FlagArtificial, + discriminant: None, }, ]; @@ -776,7 +790,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { DIB(cx), name.as_ptr(), size.bits(), - align.abi_bits() as u32, + align.bits() as u32, encoding) }; @@ -807,7 +821,7 @@ fn pointer_type_metadata( DIB(cx), pointee_type_metadata, pointer_size.bits(), - pointer_align.abi_bits() as u32, + pointer_align.bits() as u32, name.as_ptr()) } } @@ -881,7 +895,7 @@ pub fn compile_unit_metadata(tcx: TyCtxt, }; fn path_to_mdstring(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value { - let path_str = path2cstr(path); + let path_str = path_to_c_string(path); unsafe { llvm::LLVMMDStringInContext(llcx, path_str.as_ptr(), @@ -914,6 +928,7 @@ struct MemberDescription<'ll> { size: Size, align: Align, flags: DIFlags, + discriminant: Option, } // A factory for MemberDescriptions. It produces a list of member descriptions @@ -973,14 +988,14 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { f.ident.to_string() }; let field = layout.field(cx, i); - let (size, align) = field.size_and_align(); MemberDescription { name, type_metadata: type_metadata(cx, field.ty, self.span), offset: layout.fields.offset(i), - size, - align, + size: field.size, + align: field.align.abi, flags: DIFlags::FlagZero, + discriminant: None, } }).collect() } @@ -1013,6 +1028,7 @@ fn prepare_struct_metadata( struct_type, unique_type_id, struct_metadata_stub, + struct_metadata_stub, StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, @@ -1045,6 +1061,7 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { size, align, flags: DIFlags::FlagZero, + discriminant: None, } }).collect() } @@ -1059,15 +1076,18 @@ fn prepare_tuple_metadata( ) -> RecursiveTypeDescription<'ll, 'tcx> { let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); + let struct_stub = create_struct_stub(cx, + tuple_type, + &tuple_name[..], + unique_type_id, + NO_SCOPE_METADATA); + create_and_register_recursive_type_forward_declaration( cx, tuple_type, unique_type_id, - create_struct_stub(cx, - tuple_type, - &tuple_name[..], - unique_type_id, - NO_SCOPE_METADATA), + struct_stub, + struct_stub, TupleMDF(TupleMemberDescriptionFactory { ty: tuple_type, component_types: component_types.to_vec(), @@ -1091,14 +1111,14 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { -> Vec> { self.variant.fields.iter().enumerate().map(|(i, f)| { let field = self.layout.field(cx, i); - let (size, align) = field.size_and_align(); MemberDescription { name: f.ident.to_string(), type_metadata: type_metadata(cx, field.ty, self.span), offset: Size::ZERO, - size, - align, + size: field.size, + align: field.align.abi, flags: DIFlags::FlagZero, + discriminant: None, } }).collect() } @@ -1130,6 +1150,7 @@ fn prepare_union_metadata( union_type, unique_type_id, union_metadata_stub, + union_metadata_stub, UnionMDF(UnionMemberDescriptionFactory { layout: cx.layout_of(union_type), variant, @@ -1142,6 +1163,19 @@ fn prepare_union_metadata( // Enums //=----------------------------------------------------------------------------- +// DWARF variant support is only available starting in LLVM 7. +// Although the earlier enum debug info output did not work properly +// in all situations, it is better for the time being to continue to +// sometimes emit the old style rather than emit something completely +// useless when rust is compiled against LLVM 6 or older. This +// function decides which representation will be emitted. +fn use_enum_fallback(cx: &CodegenCx) -> bool { + // On MSVC we have to use the fallback mode, because LLVM doesn't + // lower variant parts to PDB. + return cx.sess().target.target.options.is_like_msvc + || llvm_util::get_major_version() < 7; +} + // Describes the members of an enum value: An enum is described as a union of // structs in DWARF. This MemberDescriptionFactory provides the description for // the members of this union; so for every variant of the given enum, this @@ -1159,6 +1193,15 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec> { let adt = &self.enum_type.ty_adt_def().unwrap(); + + // This will always find the metadata in the type map. + let fallback = use_enum_fallback(cx); + let self_metadata = if fallback { + self.containing_scope + } else { + type_metadata(cx, self.enum_type, self.span) + }; + match self.layout.variants { layout::Variants::Single { .. } if adt.variants.is_empty() => vec![], layout::Variants::Single { index } => { @@ -1167,115 +1210,181 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { self.layout, &adt.variants[index], NoDiscriminant, - self.containing_scope, + self_metadata, self.span); let member_descriptions = member_description_factory.create_member_descriptions(cx); set_members_of_composite_type(cx, + self.enum_type, variant_type_metadata, member_descriptions); vec![ MemberDescription { - name: String::new(), + name: if fallback { + String::new() + } else { + adt.variants[index].name.as_str().to_string() + }, type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, - align: self.layout.align, - flags: DIFlags::FlagZero + align: self.layout.align.abi, + flags: DIFlags::FlagZero, + discriminant: None, } ] } layout::Variants::Tagged { ref variants, .. } => { - let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata - .expect("")); - (0..variants.len()).map(|i| { + let discriminant_info = if fallback { + RegularDiscriminant(self.discriminant_type_metadata + .expect("")) + } else { + // This doesn't matter in this case. + NoDiscriminant + }; + variants.iter_enumerated().map(|(i, _)| { let variant = self.layout.for_variant(cx, i); let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, variant, &adt.variants[i], discriminant_info, - self.containing_scope, + self_metadata, self.span); let member_descriptions = member_desc_factory .create_member_descriptions(cx); set_members_of_composite_type(cx, + self.enum_type, variant_type_metadata, member_descriptions); MemberDescription { - name: String::new(), + name: if fallback { + String::new() + } else { + adt.variants[i].name.as_str().to_string() + }, type_metadata: variant_type_metadata, offset: Size::ZERO, - size: variant.size, - align: variant.align, - flags: DIFlags::FlagZero + size: self.layout.size, + align: self.layout.align.abi, + flags: DIFlags::FlagZero, + discriminant: Some(self.layout.ty.ty_adt_def().unwrap() + .discriminant_for_variant(cx.tcx, i) + .val as u64), } }).collect() } - layout::Variants::NicheFilling { dataful_variant, ref niche_variants, .. } => { - let variant = self.layout.for_variant(cx, dataful_variant); - // Create a description of the non-null variant - let (variant_type_metadata, member_description_factory) = - describe_enum_variant(cx, - variant, - &adt.variants[dataful_variant], - OptimizedDiscriminant, - self.containing_scope, - self.span); + layout::Variants::NicheFilling { + ref niche_variants, + niche_start, + ref variants, + dataful_variant, + ref niche, + } => { + if fallback { + let variant = self.layout.for_variant(cx, dataful_variant); + // Create a description of the non-null variant + let (variant_type_metadata, member_description_factory) = + describe_enum_variant(cx, + variant, + &adt.variants[dataful_variant], + OptimizedDiscriminant, + self.containing_scope, + self.span); - let variant_member_descriptions = - member_description_factory.create_member_descriptions(cx); + let variant_member_descriptions = + member_description_factory.create_member_descriptions(cx); - set_members_of_composite_type(cx, - variant_type_metadata, - variant_member_descriptions); + set_members_of_composite_type(cx, + self.enum_type, + variant_type_metadata, + variant_member_descriptions); - // Encode the information about the null variant in the union - // member's name. - let mut name = String::from("RUST$ENCODED$ENUM$"); - // HACK(eddyb) the debuggers should just handle offset+size - // of discriminant instead of us having to recover its path. - // Right now it's not even going to work for `niche_start > 0`, - // and for multiple niche variants it only supports the first. - fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - name: &mut String, - layout: TyLayout<'tcx>, - offset: Size, - size: Size) { - for i in 0..layout.fields.count() { - let field_offset = layout.fields.offset(i); - if field_offset > offset { - continue; - } - let inner_offset = offset - field_offset; - let field = layout.field(cx, i); - if inner_offset + size <= field.size { - write!(name, "{}$", i).unwrap(); - compute_field_path(cx, name, field, inner_offset, size); + // Encode the information about the null variant in the union + // member's name. + let mut name = String::from("RUST$ENCODED$ENUM$"); + // Right now it's not even going to work for `niche_start > 0`, + // and for multiple niche variants it only supports the first. + fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + name: &mut String, + layout: TyLayout<'tcx>, + offset: Size, + size: Size) { + for i in 0..layout.fields.count() { + let field_offset = layout.fields.offset(i); + if field_offset > offset { + continue; + } + let inner_offset = offset - field_offset; + let field = layout.field(cx, i); + if inner_offset + size <= field.size { + write!(name, "{}$", i).unwrap(); + compute_field_path(cx, name, field, inner_offset, size); + } } } + compute_field_path(cx, &mut name, + self.layout, + self.layout.fields.offset(0), + self.layout.field(cx, 0).size); + name.push_str(&adt.variants[*niche_variants.start()].name.as_str()); + + // Create the (singleton) list of descriptions of union members. + vec![ + MemberDescription { + name, + type_metadata: variant_type_metadata, + offset: Size::ZERO, + size: variant.size, + align: variant.align.abi, + flags: DIFlags::FlagZero, + discriminant: None, + } + ] + } else { + variants.iter_enumerated().map(|(i, _)| { + let variant = self.layout.for_variant(cx, i); + let (variant_type_metadata, member_desc_factory) = + describe_enum_variant(cx, + variant, + &adt.variants[i], + OptimizedDiscriminant, + self_metadata, + self.span); + + let member_descriptions = member_desc_factory + .create_member_descriptions(cx); + + set_members_of_composite_type(cx, + self.enum_type, + variant_type_metadata, + member_descriptions); + + let niche_value = if i == dataful_variant { + None + } else { + let value = (i.as_u32() as u128) + .wrapping_sub(niche_variants.start().as_u32() as u128) + .wrapping_add(niche_start); + let value = value & ((1u128 << niche.value.size(cx).bits()) - 1); + Some(value as u64) + }; + + MemberDescription { + name: adt.variants[i].name.as_str().to_string(), + type_metadata: variant_type_metadata, + offset: Size::ZERO, + size: self.layout.size, + align: self.layout.align.abi, + flags: DIFlags::FlagZero, + discriminant: niche_value, + } + }).collect() } - compute_field_path(cx, &mut name, - self.layout, - self.layout.fields.offset(0), - self.layout.field(cx, 0).size); - name.push_str(&adt.variants[*niche_variants.start()].name.as_str()); - - // Create the (singleton) list of descriptions of union members. - vec![ - MemberDescription { - name, - type_metadata: variant_type_metadata, - offset: Size::ZERO, - size: variant.size, - align: variant.align, - flags: DIFlags::FlagZero - } - ] } } } @@ -1297,14 +1406,19 @@ impl VariantMemberDescriptionFactory<'ll, 'tcx> { let (size, align) = cx.size_and_align_of(ty); MemberDescription { name: name.to_string(), - type_metadata: match self.discriminant_type_metadata { - Some(metadata) if i == 0 => metadata, - _ => type_metadata(cx, ty, self.span) + type_metadata: if use_enum_fallback(cx) { + match self.discriminant_type_metadata { + Some(metadata) if i == 0 => metadata, + _ => type_metadata(cx, ty, self.span) + } + } else { + type_metadata(cx, ty, self.span) }, offset: self.offsets[i], size, align, - flags: DIFlags::FlagZero + flags: DIFlags::FlagZero, + discriminant: None, } }).collect() } @@ -1317,10 +1431,10 @@ enum EnumDiscriminantInfo<'ll> { NoDiscriminant } -// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type -// of the variant, and (3) a MemberDescriptionFactory for producing the -// descriptions of the fields of the variant. This is a rudimentary version of a -// full RecursiveTypeDescription. +// Returns a tuple of (1) type_metadata_stub of the variant, (2) a +// MemberDescriptionFactory for producing the descriptions of the +// fields of the variant. This is a rudimentary version of a full +// RecursiveTypeDescription. fn describe_enum_variant( cx: &CodegenCx<'ll, 'tcx>, layout: layout::TyLayout<'tcx>, @@ -1343,29 +1457,46 @@ fn describe_enum_variant( unique_type_id, Some(containing_scope)); - // If this is not a univariant enum, there is also the discriminant field. - let (discr_offset, discr_arg) = match discriminant_info { - RegularDiscriminant(_) => { - // We have the layout of an enum variant, we need the layout of the outer enum - let enum_layout = cx.layout_of(layout.ty); - (Some(enum_layout.fields.offset(0)), - Some(("RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, 0).ty))) - } - _ => (None, None), - }; - let offsets = discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| { - layout.fields.offset(i) - })).collect(); - // Build an array of (field name, field type) pairs to be captured in the factory closure. - let args = discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { - let name = if variant.ctor_kind == CtorKind::Fn { - format!("__{}", i) - } else { - variant.fields[i].ident.to_string() + let (offsets, args) = if use_enum_fallback(cx) { + // If this is not a univariant enum, there is also the discriminant field. + let (discr_offset, discr_arg) = match discriminant_info { + RegularDiscriminant(_) => { + // We have the layout of an enum variant, we need the layout of the outer enum + let enum_layout = cx.layout_of(layout.ty); + (Some(enum_layout.fields.offset(0)), + Some(("RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, 0).ty))) + } + _ => (None, None), }; - (name, layout.field(cx, i).ty) - })).collect(); + ( + discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| { + layout.fields.offset(i) + })).collect(), + discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { + let name = if variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + variant.fields[i].ident.to_string() + }; + (name, layout.field(cx, i).ty) + })).collect() + ) + } else { + ( + (0..layout.fields.count()).map(|i| { + layout.fields.offset(i) + }).collect(), + (0..layout.fields.count()).map(|i| { + let name = if variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + variant.fields[i].ident.to_string() + }; + (name, layout.field(cx, i).ty) + }).collect() + ) + }; let member_description_factory = VariantMDF(VariantMemberDescriptionFactory { @@ -1401,22 +1532,22 @@ fn prepare_enum_metadata( // let file_metadata = unknown_file_metadata(cx); - let def = enum_type.ty_adt_def().unwrap(); - let enumerators_metadata: Vec<_> = def.discriminants(cx.tcx) - .zip(&def.variants) - .map(|(discr, v)| { - let name = SmallCStr::new(&v.name.as_str()); - unsafe { - Some(llvm::LLVMRustDIBuilderCreateEnumerator( - DIB(cx), - name.as_ptr(), - // FIXME: what if enumeration has i128 discriminant? - discr.val as u64)) - } - }) - .collect(); - let discriminant_type_metadata = |discr: layout::Primitive| { + let def = enum_type.ty_adt_def().unwrap(); + let enumerators_metadata: Vec<_> = def.discriminants(cx.tcx) + .zip(&def.variants) + .map(|((_, discr), v)| { + let name = SmallCStr::new(&v.name.as_str()); + unsafe { + Some(llvm::LLVMRustDIBuilderCreateEnumerator( + DIB(cx), + name.as_ptr(), + // FIXME: what if enumeration has i128 discriminant? + discr.val as u64)) + } + }) + .collect(); + let disr_type_key = (enum_def_id, discr); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() @@ -1439,9 +1570,9 @@ fn prepare_enum_metadata( file_metadata, UNKNOWN_LINE_NUMBER, discriminant_size.bits(), - discriminant_align.abi_bits() as u32, + discriminant_align.abi.bits() as u32, create_DIArray(DIB(cx), &enumerators_metadata), - discriminant_base_type_metadata) + discriminant_base_type_metadata, true) }; debug_context(cx).created_enum_disr_types @@ -1455,48 +1586,155 @@ fn prepare_enum_metadata( let layout = cx.layout_of(enum_type); - let discriminant_type_metadata = match layout.variants { - layout::Variants::Single { .. } | - layout::Variants::NicheFilling { .. } => None, - layout::Variants::Tagged { ref tag, .. } => { - Some(discriminant_type_metadata(tag.value)) - } - }; - - if let (&layout::Abi::Scalar(_), Some(discr)) = (&layout.abi, discriminant_type_metadata) { - return FinalMetadata(discr); + match (&layout.abi, &layout.variants) { + (&layout::Abi::Scalar(_), &layout::Variants::Tagged {ref tag, .. }) => + return FinalMetadata(discriminant_type_metadata(tag.value)), + _ => {} } - let (enum_type_size, enum_type_align) = layout.size_and_align(); - let enum_name = SmallCStr::new(&enum_name); let unique_type_id_str = SmallCStr::new( debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) ); - let enum_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateUnionType( - DIB(cx), - containing_scope, - enum_name.as_ptr(), - file_metadata, - UNKNOWN_LINE_NUMBER, - enum_type_size.bits(), - enum_type_align.abi_bits() as u32, - DIFlags::FlagZero, - None, - 0, // RuntimeLang - unique_type_id_str.as_ptr()) + + if use_enum_fallback(cx) { + let discriminant_type_metadata = match layout.variants { + layout::Variants::Single { .. } | + layout::Variants::NicheFilling { .. } => None, + layout::Variants::Tagged { ref tag, .. } => { + Some(discriminant_type_metadata(tag.value)) + } + }; + + let enum_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateUnionType( + DIB(cx), + containing_scope, + enum_name.as_ptr(), + file_metadata, + UNKNOWN_LINE_NUMBER, + layout.size.bits(), + layout.align.abi.bits() as u32, + DIFlags::FlagZero, + None, + 0, // RuntimeLang + unique_type_id_str.as_ptr()) + }; + + return create_and_register_recursive_type_forward_declaration( + cx, + enum_type, + unique_type_id, + enum_metadata, + enum_metadata, + EnumMDF(EnumMemberDescriptionFactory { + enum_type, + layout, + discriminant_type_metadata, + containing_scope, + span, + }), + ); + } + + let discriminator_metadata = match &layout.variants { + // A single-variant enum has no discriminant. + &layout::Variants::Single { .. } => None, + + &layout::Variants::NicheFilling { ref niche, .. } => { + // Find the integer type of the correct size. + let size = niche.value.size(cx); + let align = niche.value.align(cx); + + let discr_type = match niche.value { + layout::Int(t, _) => t, + layout::Float(layout::FloatTy::F32) => Integer::I32, + layout::Float(layout::FloatTy::F64) => Integer::I64, + layout::Pointer => cx.data_layout().ptr_sized_integer(), + }.to_ty(cx.tcx, false); + + let discr_metadata = basic_type_metadata(cx, discr_type); + unsafe { + Some(llvm::LLVMRustDIBuilderCreateMemberType( + DIB(cx), + containing_scope, + ptr::null_mut(), + file_metadata, + UNKNOWN_LINE_NUMBER, + size.bits(), + align.abi.bits() as u32, + layout.fields.offset(0).bits(), + DIFlags::FlagArtificial, + discr_metadata)) + } + }, + + &layout::Variants::Tagged { ref tag, .. } => { + let discr_type = tag.value.to_ty(cx.tcx); + let (size, align) = cx.size_and_align_of(discr_type); + + let discr_metadata = basic_type_metadata(cx, discr_type); + unsafe { + Some(llvm::LLVMRustDIBuilderCreateMemberType( + DIB(cx), + containing_scope, + ptr::null_mut(), + file_metadata, + UNKNOWN_LINE_NUMBER, + size.bits(), + align.bits() as u32, + layout.fields.offset(0).bits(), + DIFlags::FlagArtificial, + discr_metadata)) + } + }, + }; + + let empty_array = create_DIArray(DIB(cx), &[]); + let variant_part = unsafe { + llvm::LLVMRustDIBuilderCreateVariantPart( + DIB(cx), + containing_scope, + ptr::null_mut(), + file_metadata, + UNKNOWN_LINE_NUMBER, + layout.size.bits(), + layout.align.abi.bits() as u32, + DIFlags::FlagZero, + discriminator_metadata, + empty_array, + unique_type_id_str.as_ptr()) + }; + + // The variant part must be wrapped in a struct according to DWARF. + let type_array = create_DIArray(DIB(cx), &[Some(variant_part)]); + let struct_wrapper = unsafe { + llvm::LLVMRustDIBuilderCreateStructType( + DIB(cx), + Some(containing_scope), + enum_name.as_ptr(), + file_metadata, + UNKNOWN_LINE_NUMBER, + layout.size.bits(), + layout.align.abi.bits() as u32, + DIFlags::FlagZero, + None, + type_array, + 0, + None, + unique_type_id_str.as_ptr()) }; return create_and_register_recursive_type_forward_declaration( cx, enum_type, unique_type_id, - enum_metadata, + struct_wrapper, + variant_part, EnumMDF(EnumMemberDescriptionFactory { enum_type, layout, - discriminant_type_metadata, + discriminant_type_metadata: None, containing_scope, span, }), @@ -1534,13 +1772,15 @@ fn composite_type_metadata( containing_scope); // ... and immediately create and add the member descriptions. set_members_of_composite_type(cx, + composite_type, composite_type_metadata, member_descriptions); composite_type_metadata } -fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, +fn set_members_of_composite_type(cx: &CodegenCx<'ll, 'tcx>, + composite_type: Ty<'tcx>, composite_type_metadata: &'ll DICompositeType, member_descriptions: Vec>) { // In some rare cases LLVM metadata uniquing would lead to an existing type @@ -1565,25 +1805,76 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, .map(|member_description| { let member_name = CString::new(member_description.name).unwrap(); unsafe { - Some(llvm::LLVMRustDIBuilderCreateMemberType( + Some(llvm::LLVMRustDIBuilderCreateVariantMemberType( DIB(cx), composite_type_metadata, member_name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, member_description.size.bits(), - member_description.align.abi_bits() as u32, + member_description.align.bits() as u32, member_description.offset.bits(), + match member_description.discriminant { + None => None, + Some(value) => Some(cx.const_u64(value)), + }, member_description.flags, member_description.type_metadata)) } }) .collect(); + let type_params = compute_type_parameters(cx, composite_type); unsafe { let type_array = create_DIArray(DIB(cx), &member_metadata[..]); - llvm::LLVMRustDICompositeTypeSetTypeArray( - DIB(cx), composite_type_metadata, type_array); + llvm::LLVMRustDICompositeTypeReplaceArrays( + DIB(cx), composite_type_metadata, Some(type_array), type_params); + } +} + +// Compute the type parameters for a type, if any, for the given +// metadata. +fn compute_type_parameters(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> Option<&'ll DIArray> { + if let ty::Adt(def, substs) = ty.sty { + if !substs.types().next().is_none() { + let generics = cx.tcx.generics_of(def.did); + let names = get_parameter_names(cx, generics); + let template_params: Vec<_> = substs.iter().zip(names).filter_map(|(kind, name)| { + if let UnpackedKind::Type(ty) = kind.unpack() { + let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type_metadata = + type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = SmallCStr::new(&name.as_str()); + Some(unsafe { + + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr(), + actual_type_metadata, + unknown_file_metadata(cx), + 0, + 0, + )) + }) + } else { + None + } + }).collect(); + + return Some(create_DIArray(DIB(cx), &template_params[..])); + } + } + return Some(create_DIArray(DIB(cx), &[])); + + fn get_parameter_names(cx: &CodegenCx, + generics: &ty::Generics) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_parameter_names(cx, cx.tcx.generics_of(def_id)) + }); + names.extend(generics.params.iter().map(|param| param.name)); + names } } @@ -1616,7 +1907,7 @@ fn create_struct_stub( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, struct_size.bits(), - struct_align.abi_bits() as u32, + struct_align.bits() as u32, DIFlags::FlagZero, None, empty_array, @@ -1654,7 +1945,7 @@ fn create_union_stub( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, union_size.bits(), - union_align.abi_bits() as u32, + union_align.bits() as u32, DIFlags::FlagZero, Some(empty_array), 0, // RuntimeLang @@ -1723,27 +2014,11 @@ pub fn create_global_var_metadata( is_local_to_unit, global, None, - global_align.abi() as u32, + global_align.bytes() as u32, ); } } -// Creates an "extension" of an existing DIScope into another file. -pub fn extend_scope_to_file( - cx: &CodegenCx<'ll, '_>, - scope_metadata: &'ll DIScope, - file: &syntax_pos::SourceFile, - defining_crate: CrateNum, -) -> &'ll DILexicalBlock { - let file_metadata = file_metadata(cx, &file.name, defining_crate); - unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlockFile( - DIB(cx), - scope_metadata, - file_metadata) - } -} - /// Creates debug information for the given vtable, which is for the /// given type. /// @@ -1777,7 +2052,7 @@ pub fn create_vtable_metadata( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, Size::ZERO.bits(), - cx.tcx.data_layout.pointer_align.abi_bits() as u32, + cx.tcx.data_layout.pointer_align.abi.bits() as u32, DIFlags::FlagArtificial, None, empty_array, @@ -1799,3 +2074,19 @@ pub fn create_vtable_metadata( 0); } } + +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file( + cx: &CodegenCx<'ll, '_>, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, +) -> &'ll DILexicalBlock { + let file_metadata = file_metadata(cx, &file.name, defining_crate); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(cx), + scope_metadata, + file_metadata) + } +} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 042e72e921..78bdf678f6 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -11,8 +11,8 @@ // See doc.rs for documentation. mod doc; -use self::VariableAccess::*; -use self::VariableKind::*; +use rustc_codegen_ssa::debuginfo::VariableAccess::*; +use rustc_codegen_ssa::debuginfo::VariableKind::*; use self::utils::{DIB, span_start, create_DIArray, is_node_local_to_unit}; use self::namespace::mangled_name_of_instance; @@ -21,7 +21,8 @@ use self::metadata::{type_metadata, file_metadata, TypeMap}; use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; use llvm; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags}; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags, + DILexicalBlock}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def_id::{DefId, CrateNum}; use rustc::ty::subst::{Substs, UnpackedKind}; @@ -35,7 +36,10 @@ use rustc::mir; use rustc::session::config::{self, DebugInfo}; use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_data_structures::indexed_vec::IndexVec; use value::Value; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, + VariableKind, FunctionDebugContextData}; use libc::c_uint; use std::cell::{Cell, RefCell}; @@ -44,7 +48,8 @@ use std::ffi::CString; use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; +use rustc_codegen_ssa::traits::*; pub mod gdb; mod utils; @@ -54,10 +59,8 @@ pub mod metadata; mod create_scope_map; mod source_loc; -pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; -pub use self::source_loc::start_emitting_source_locations; +pub use self::create_scope_map::{create_mir_scopes}; pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_vtable_metadata; pub use self::metadata::extend_scope_to_file; pub use self::source_loc::set_source_location; @@ -103,60 +106,12 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { created_files: Default::default(), created_enum_disr_types: Default::default(), type_map: Default::default(), - namespace_map: RefCell::new(DefIdMap()), + namespace_map: RefCell::new(Default::default()), composite_types_completed: Default::default(), } } } -pub enum FunctionDebugContext<'ll> { - RegularContext(FunctionDebugContextData<'ll>), - DebugInfoDisabled, - FunctionWithoutDebugInfo, -} - -impl FunctionDebugContext<'ll> { - pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<'ll> { - match *self { - FunctionDebugContext::RegularContext(ref data) => data, - FunctionDebugContext::DebugInfoDisabled => { - span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); - } - } - } - - fn debuginfo_disabled_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" - } - - fn should_be_ignored_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext for function that should be \ - ignored by debug info!" - } -} - -pub struct FunctionDebugContextData<'ll> { - fn_metadata: &'ll DISubprogram, - source_locations_enabled: Cell, - pub defining_crate: CrateNum, -} - -pub enum VariableAccess<'a, 'll> { - // The llptr given is an alloca containing the variable's value - DirectVariable { alloca: &'ll Value }, - // The llptr given is an alloca containing the start of some pointer chain - // leading to the variable's content. - IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] } -} - -pub enum VariableKind { - ArgumentVariable(usize /*index*/), - LocalVariable, -} - /// Create any deferred debug metadata nodes pub fn finalize(cx: &CodegenCx) { if cx.dbg_cx.is_none() { @@ -202,348 +157,398 @@ pub fn finalize(cx: &CodegenCx) { }; } -/// Creates the function-specific debug context. -/// -/// Returns the FunctionDebugContext for the function which holds state needed -/// for debug info creation. The function may also return another variant of the -/// FunctionDebugContext enum which indicates why no debuginfo should be created -/// for the function. -pub fn create_function_debug_context( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - sig: ty::FnSig<'tcx>, - llfn: &'ll Value, - mir: &mir::Mir, -) -> FunctionDebugContext<'ll> { - if cx.sess().opts.debuginfo == DebugInfo::None { - return FunctionDebugContext::DebugInfoDisabled; +impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn declare_local( + &mut self, + dbg_context: &FunctionDebugContext<&'ll DISubprogram>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: &'ll DIScope, + variable_access: VariableAccess<'_, &'ll Value>, + variable_kind: VariableKind, + span: Span, + ) { + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + let cx = self.cx(); + + let file = span_start(cx, span).file; + let file_metadata = file_metadata(cx, + &file.name, + dbg_context.get_ref(span).defining_crate); + + let loc = span_start(cx, span); + let type_metadata = type_metadata(cx, variable_type, span); + + let (argument_index, dwarf_tag) = match variable_kind { + ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), + LocalVariable => (0, DW_TAG_auto_variable) + }; + let align = cx.align_of(variable_type); + + let name = SmallCStr::new(&variable_name.as_str()); + match (variable_access, &[][..]) { + (DirectVariable { alloca }, address_operations) | + (IndirectVariable {alloca, address_operations}, _) => { + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateVariable( + DIB(cx), + dwarf_tag, + scope_metadata, + name.as_ptr(), + file_metadata, + loc.line as c_uint, + type_metadata, + cx.sess().opts.optimize != config::OptLevel::No, + DIFlags::FlagZero, + argument_index, + align.bytes() as u32, + ) + }; + source_loc::set_debug_location(self, + InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); + unsafe { + let debug_loc = llvm::LLVMGetCurrentDebugLocation(self.llbuilder); + let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + alloca, + metadata, + address_operations.as_ptr(), + address_operations.len() as c_uint, + debug_loc, + self.llbb()); + + llvm::LLVMSetInstDebugLocation(self.llbuilder, instr); + } + source_loc::set_debug_location(self, UnknownLocation); + } + } } - if let InstanceDef::Item(def_id) = instance.def { - if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + fn set_source_location( + &mut self, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + scope: Option<&'ll DIScope>, + span: Span, + ) { + set_source_location(debug_context, &self, scope, span) + } + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { + gdb::insert_reference_to_gdb_debug_scripts_section_global(self) + } +} + +impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: &'ll Value, + mir: &mir::Mir, + ) -> FunctionDebugContext<&'ll DISubprogram> { + if self.sess().opts.debuginfo == DebugInfo::None { + return FunctionDebugContext::DebugInfoDisabled; + } + + if let InstanceDef::Item(def_id) = instance.def { + if self.tcx().codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + } + + let span = mir.span; + + // This can be the case for functions inlined from another crate + if span.is_dummy() { + // FIXME(simulacrum): Probably can't happen; remove. return FunctionDebugContext::FunctionWithoutDebugInfo; } - } - let span = mir.span; + let def_id = instance.def_id(); + let containing_scope = get_containing_scope(self, instance); + let loc = span_start(self, span); + let file_metadata = file_metadata(self, &loc.file.name, def_id.krate); - // This can be the case for functions inlined from another crate - if span.is_dummy() { - // FIXME(simulacrum): Probably can't happen; remove. - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - - let def_id = instance.def_id(); - let containing_scope = get_containing_scope(cx, instance); - let loc = span_start(cx, span); - let file_metadata = file_metadata(cx, &loc.file.name, def_id.krate); - - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(cx, sig); - llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) - }; - - // Find the enclosing function, in case this is a closure. - let def_key = cx.tcx.def_key(def_id); - let mut name = def_key.disambiguated_data.data.to_string(); - - let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id); - - // Get_template_parameters() will append a `<...>` clause to the function - // name if necessary. - let generics = cx.tcx.generics_of(enclosing_fn_def_id); - let substs = instance.substs.truncate_to(cx.tcx, generics); - let template_parameters = get_template_parameters(cx, - &generics, - substs, - file_metadata, - &mut name); - - // Get the linkage_name, which is just the symbol name - let linkage_name = mangled_name_of_instance(cx, instance); - - let scope_line = span_start(cx, span).line; - let is_local_to_unit = is_node_local_to_unit(cx, def_id); - - let function_name = CString::new(name).unwrap(); - let linkage_name = SmallCStr::new(&linkage_name.as_str()); - - let mut flags = DIFlags::FlagPrototyped; - - let local_id = cx.tcx.hir.as_local_node_id(def_id); - if let Some((id, _, _)) = *cx.sess().entry_fn.borrow() { - if local_id == Some(id) { - flags |= DIFlags::FlagMainSubprogram; - } - } - - if cx.layout_of(sig.output()).abi.is_uninhabited() { - flags |= DIFlags::FlagNoReturn; - } - - let fn_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateFunction( - DIB(cx), - containing_scope, - function_name.as_ptr(), - linkage_name.as_ptr(), - file_metadata, - loc.line as c_uint, - function_type_metadata, - is_local_to_unit, - true, - scope_line as c_uint, - flags, - cx.sess().opts.optimize != config::OptLevel::No, - llfn, - template_parameters, - None) - }; - - // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = FunctionDebugContextData { - fn_metadata, - source_locations_enabled: Cell::new(false), - defining_crate: def_id.krate, - }; - - return FunctionDebugContext::RegularContext(fn_debug_context); - - fn get_function_signature( - cx: &CodegenCx<'ll, 'tcx>, - sig: ty::FnSig<'tcx>, - ) -> &'ll DIArray { - if cx.sess().opts.debuginfo == DebugInfo::Limited { - return create_DIArray(DIB(cx), &[]); - } - - let mut signature = Vec::with_capacity(sig.inputs().len() + 1); - - // Return type -- llvm::DIBuilder wants this at index 0 - signature.push(match sig.output().sty { - ty::Tuple(ref tys) if tys.is_empty() => None, - _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) - }); - - let inputs = if sig.abi == Abi::RustCall { - &sig.inputs()[..sig.inputs().len() - 1] - } else { - sig.inputs() + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(self, sig); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), file_metadata, fn_signature) }; - // Arguments types - if cx.sess().target.target.options.is_like_msvc { - // FIXME(#42800): - // There is a bug in MSDIA that leads to a crash when it encounters - // a fixed-size array of `u8` or something zero-sized in a - // function-type (see #40477). - // As a workaround, we replace those fixed-size arrays with a - // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would - // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, - // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. - // This transformed type is wrong, but these function types are - // already inaccurate due to ABI adjustments (see #42800). - signature.extend(inputs.iter().map(|&t| { - let t = match t.sty { - ty::Array(ct, _) - if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { - cx.tcx.mk_imm_ptr(ct) + // Find the enclosing function, in case this is a closure. + let def_key = self.tcx().def_key(def_id); + let mut name = def_key.disambiguated_data.data.to_string(); + + let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id); + + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = self.tcx().generics_of(enclosing_fn_def_id); + let substs = instance.substs.truncate_to(self.tcx(), generics); + let template_parameters = get_template_parameters(self, + &generics, + substs, + file_metadata, + &mut name); + + // Get the linkage_name, which is just the symbol name + let linkage_name = mangled_name_of_instance(self, instance); + + let scope_line = span_start(self, span).line; + let is_local_to_unit = is_node_local_to_unit(self, def_id); + + let function_name = CString::new(name).unwrap(); + let linkage_name = SmallCStr::new(&linkage_name.as_str()); + + let mut flags = DIFlags::FlagPrototyped; + + let local_id = self.tcx().hir.as_local_node_id(def_id); + if let Some((id, _, _)) = *self.sess().entry_fn.borrow() { + if local_id == Some(id) { + flags |= DIFlags::FlagMainSubprogram; + } + } + + if self.layout_of(sig.output()).abi.is_uninhabited() { + flags |= DIFlags::FlagNoReturn; + } + + let fn_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFunction( + DIB(self), + containing_scope, + function_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + loc.line as c_uint, + function_type_metadata, + is_local_to_unit, + true, + scope_line as c_uint, + flags, + self.sess().opts.optimize != config::OptLevel::No, + llfn, + template_parameters, + None) + }; + + // Initialize fn debug context (including scope map and namespace map) + let fn_debug_context = FunctionDebugContextData { + fn_metadata, + source_locations_enabled: Cell::new(false), + defining_crate: def_id.krate, + }; + + return FunctionDebugContext::RegularContext(fn_debug_context); + + fn get_function_signature<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + ) -> &'ll DIArray { + if cx.sess().opts.debuginfo == DebugInfo::Limited { + return create_DIArray(DIB(cx), &[]); + } + + let mut signature = Vec::with_capacity(sig.inputs().len() + 1); + + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(match sig.output().sty { + ty::Tuple(ref tys) if tys.is_empty() => None, + _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) + }); + + let inputs = if sig.abi == Abi::RustCall { + &sig.inputs()[..sig.inputs().len() - 1] + } else { + sig.inputs() + }; + + // Arguments types + if cx.sess().target.target.options.is_like_msvc { + // FIXME(#42800): + // There is a bug in MSDIA that leads to a crash when it encounters + // a fixed-size array of `u8` or something zero-sized in a + // function-type (see #40477). + // As a workaround, we replace those fixed-size arrays with a + // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would + // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, + // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. + // This transformed type is wrong, but these function types are + // already inaccurate due to ABI adjustments (see #42800). + signature.extend(inputs.iter().map(|&t| { + let t = match t.sty { + ty::Array(ct, _) + if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { + cx.tcx.mk_imm_ptr(ct) + } + _ => t + }; + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } else { + signature.extend(inputs.iter().map(|t| { + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } + + if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { + if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { + signature.extend( + args.iter().map(|argument_type| { + Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) + }) + ); + } + } + + create_DIArray(DIB(cx), &signature[..]) + } + + fn get_template_parameters<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + generics: &ty::Generics, + substs: &Substs<'tcx>, + file_metadata: &'ll DIFile, + name_to_append_suffix_to: &mut String, + ) -> &'ll DIArray { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } + + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } + + let actual_type = + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx, + actual_type, + true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); + } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { + let names = get_parameter_names(cx, generics); + substs.iter().zip(names).filter_map(|(kind, name)| { + if let UnpackedKind::Type(ty) = kind.unpack() { + let actual_type = + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type_metadata = + type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = SmallCStr::new(&name.as_str()); + Some(unsafe { + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr(), + actual_type_metadata, + file_metadata, + 0, + 0, + )) + }) + } else { + None } - _ => t - }; - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } else { - signature.extend(inputs.iter().map(|t| { - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); + }).collect() + } else { + vec![] + }; + + return create_DIArray(DIB(cx), &template_params[..]); } - if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { - if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { - signature.extend( - args.iter().map(|argument_type| { - Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) - }) - ); - } + fn get_parameter_names(cx: &CodegenCx, + generics: &ty::Generics) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_parameter_names(cx, cx.tcx.generics_of(def_id)) + }); + names.extend(generics.params.iter().map(|param| param.name)); + names } - create_DIArray(DIB(cx), &signature[..]) - } + fn get_containing_scope<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + instance: Instance<'tcx>, + ) -> &'ll DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + &cx.tcx.type_of(impl_def_id), + ); - fn get_template_parameters( - cx: &CodegenCx<'ll, 'tcx>, - generics: &ty::Generics, - substs: &Substs<'tcx>, - file_metadata: &'ll DIFile, - name_to_append_suffix_to: &mut String, - ) -> &'ll DIArray { - if substs.types().next().is_none() { - return create_DIArray(DIB(cx), &[]); - } - - name_to_append_suffix_to.push('<'); - for (i, actual_type) in substs.types().enumerate() { - if i != 0 { - name_to_append_suffix_to.push_str(","); - } - - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); - // Add actual type name to <...> clause of function name - let actual_type_name = compute_debuginfo_type_name(cx, - actual_type, - true); - name_to_append_suffix_to.push_str(&actual_type_name[..]); - } - name_to_append_suffix_to.push('>'); - - // Again, only create type information if full debuginfo is enabled - let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { - let names = get_parameter_names(cx, generics); - substs.iter().zip(names).filter_map(|(kind, name)| { - if let UnpackedKind::Type(ty) = kind.unpack() { - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); - let actual_type_metadata = - type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); - let name = SmallCStr::new(&name.as_str()); - Some(unsafe { - Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_ptr(), - actual_type_metadata, - file_metadata, - 0, - 0, - )) - }) + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::Adt(def, ..) if !def.is_box() => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } } else { + // For trait method impls we still use the "parallel namespace" + // strategy None } - }).collect() - } else { - vec![] - }; + }); - create_DIArray(DIB(cx), &template_params[..]) - } - - fn get_parameter_names(cx: &CodegenCx, - generics: &ty::Generics) - -> Vec { - let mut names = generics.parent.map_or(vec![], |def_id| { - get_parameter_names(cx, cx.tcx.generics_of(def_id)) - }); - names.extend(generics.params.iter().map(|param| param.name)); - names - } - - fn get_containing_scope( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - ) -> &'ll DIScope { - // First, let's see if this is a method within an inherent impl. Because - // if yes, we want to make the result subroutine DIE a child of the - // subroutine's self-type. - let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { - // If the method does *not* belong to a trait, proceed - if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { - let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( - instance.substs, - ty::ParamEnv::reveal_all(), - &cx.tcx.type_of(impl_def_id), - ); - - // Only "class" methods are generally understood by LLVM, - // so avoid methods on other types (e.g. `<*mut T>::null`). - match impl_self_ty.sty { - ty::Adt(def, ..) if !def.is_box() => { - Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) - } - _ => None - } - } else { - // For trait method impls we still use the "parallel namespace" - // strategy - None - } - }); - - self_type.unwrap_or_else(|| { - namespace::item_namespace(cx, DefId { - krate: instance.def_id().krate, - index: cx.tcx - .def_key(instance.def_id()) - .parent - .expect("get_containing_scope: missing parent?") + self_type.unwrap_or_else(|| { + namespace::item_namespace(cx, DefId { + krate: instance.def_id().krate, + index: cx.tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?") + }) }) - }) + } } -} -pub fn declare_local( - bx: &Builder<'a, 'll, 'tcx>, - dbg_context: &FunctionDebugContext<'ll>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: &'ll DIScope, - variable_access: VariableAccess<'_, 'll>, - variable_kind: VariableKind, - span: Span, -) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); - let cx = bx.cx; + fn create_vtable_metadata( + &self, + ty: Ty<'tcx>, + vtable: Self::Value, + ) { + metadata::create_vtable_metadata(self, ty, vtable) + } - let file = span_start(cx, span).file; - let file_metadata = file_metadata(cx, - &file.name, - dbg_context.get_ref(span).defining_crate); + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + ) -> IndexVec> { + create_scope_map::create_mir_scopes(self, mir, debug_context) + } - let loc = span_start(cx, span); - let type_metadata = type_metadata(cx, variable_type, span); + fn extend_scope_to_file( + &self, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, + ) -> &'ll DILexicalBlock { + metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate) + } - let (argument_index, dwarf_tag) = match variable_kind { - ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), - LocalVariable => (0, DW_TAG_auto_variable) - }; - let align = cx.align_of(variable_type); + fn debuginfo_finalize(&self) { + finalize(self) + } - let name = SmallCStr::new(&variable_name.as_str()); - match (variable_access, &[][..]) { - (DirectVariable { alloca }, address_operations) | - (IndirectVariable {alloca, address_operations}, _) => { - let metadata = unsafe { - llvm::LLVMRustDIBuilderCreateVariable( - DIB(cx), - dwarf_tag, - scope_metadata, - name.as_ptr(), - file_metadata, - loc.line as c_uint, - type_metadata, - cx.sess().opts.optimize != config::OptLevel::No, - DIFlags::FlagZero, - argument_index, - align.abi() as u32, - ) - }; - source_loc::set_debug_location(bx, - InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); - unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder); - let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( - DIB(cx), - alloca, - metadata, - address_operations.as_ptr(), - address_operations.len() as c_uint, - debug_loc, - bx.llbb()); - - llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr); - } - source_loc::set_debug_location(bx, UnknownLocation); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4] { + unsafe { + [llvm::LLVMRustDIBuilderCreateOpDeref(), + llvm::LLVMRustDIBuilderCreateOpPlusUconst(), + byte_offset_of_var_in_env as i64, + llvm::LLVMRustDIBuilderCreateOpDeref()] } } } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 60ebcb8881..95196287ab 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -12,11 +12,12 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; use super::metadata::UNKNOWN_COLUMN_NUMBER; -use super::FunctionDebugContext; +use rustc_codegen_ssa::debuginfo::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; +use rustc_codegen_ssa::traits::*; use libc::c_uint; use syntax_pos::{Span, Pos}; @@ -24,8 +25,8 @@ use syntax_pos::{Span, Pos}; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). -pub fn set_source_location( - debug_context: &FunctionDebugContext<'ll>, +pub fn set_source_location( + debug_context: &FunctionDebugContext, bx: &Builder<'_, 'll, '_>, scope: Option<&'ll DIScope>, span: Span, @@ -41,7 +42,7 @@ pub fn set_source_location( let dbg_loc = if function_debug_context.source_locations_enabled.get() { debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); - let loc = span_start(bx.cx, span); + let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) } else { UnknownLocation @@ -49,18 +50,6 @@ pub fn set_source_location( set_debug_location(bx, dbg_loc); } -/// Enables emitting source locations for the given functions. -/// -/// Since we don't want source locations to be emitted for the function prelude, -/// they are disabled when beginning to codegen a new function. This functions -/// switches source location emitting on and must therefore be called before the -/// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) { - if let FunctionDebugContext::RegularContext(ref data) = *dbg_context { - data.source_locations_enabled.set(true); - } -} - #[derive(Copy, Clone, PartialEq)] pub enum InternalDebugLocation<'ll> { @@ -78,13 +67,16 @@ impl InternalDebugLocation<'ll> { } } -pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDebugLocation<'ll>) { +pub fn set_debug_location( + bx: &Builder<'_, 'll, '_>, + debug_location: InternalDebugLocation<'ll> +) { let metadata_node = match debug_location { KnownLocation { scope, line, col } => { // For MSVC, set the column number to zero. // Otherwise, emit it. This mimics clang behaviour. // See discussion in https://github.com/rust-lang/rust/issues/42921 - let col_used = if bx.cx.sess().target.target.options.is_like_msvc { + let col_used = if bx.sess().target.target.options.is_like_msvc { UNKNOWN_COLUMN_NUMBER } else { col as c_uint @@ -93,7 +85,7 @@ pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDeb unsafe { Some(llvm::LLVMRustDIBuilderCreateDebugLocation( - debug_context(bx.cx).llcontext, + debug_context(bx.cx()).llcontext, line as c_uint, col_used, scope, diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index f5abb527e4..60545f9e19 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -14,6 +14,7 @@ use common::CodegenCx; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; +use rustc_codegen_ssa::traits::*; use rustc::hir; @@ -171,8 +172,10 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } ty::Error | ty::Infer(_) | + ty::Placeholder(..) | ty::UnnormalizedProjection(..) | ty::Projection(..) | + ty::Bound(..) | ty::Opaque(..) | ty::GeneratorWitness(..) | ty::Param(_) => { diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index 19bc4ac39d..89262beb35 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -19,6 +19,7 @@ use rustc::ty::DefIdTree; use llvm; use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; use common::{CodegenCx}; +use rustc_codegen_ssa::traits::*; use syntax_pos::{self, Span}; diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 26969e24f0..c23aab409a 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -22,7 +22,7 @@ use llvm; use llvm::AttributePlace::Function; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, PolyFnSig}; use rustc::ty::layout::LayoutOf; use rustc::session::config::Sanitizer; use rustc_data_structures::small_c_str::SmallCStr; @@ -30,24 +30,10 @@ use rustc_target::spec::PanicStrategy; use abi::{Abi, FnType, FnTypeExt}; use attributes; use context::CodegenCx; -use common; use type_::Type; +use rustc_codegen_ssa::traits::*; use value::Value; - -/// Declare a global value. -/// -/// If there’s a value with the same name already declared, the function will -/// return its Value instead. -pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value { - debug!("declare_global(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { - llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty) - } -} - - /// Declare a function. /// /// If there’s a value with the same name already declared, the function will @@ -109,124 +95,108 @@ fn declare_raw_fn( llfn } +impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> { -/// Declare a C ABI function. -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_fn` instead. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> &'ll Value { - declare_raw_fn(cx, name, llvm::CCallConv, fn_type) -} - - -/// Declare a Rust function. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); - let sig = common::ty_fn_sig(cx, fn_type); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - debug!("declare_rust_fn (after region erasure) sig={:?}", sig); - - let fty = FnType::new(cx, sig, &[]); - let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx)); - - if cx.layout_of(sig.output()).abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_llfn(Function, llfn); - } - - if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { - attributes::unwind(llfn, false); - } - - fty.apply_attrs_llfn(llfn); - - llfn -} - - -/// Declare a global with an intention to define it. -/// -/// Use this function when you intend to define a global. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> { - if get_defined_value(cx, name).is_some() { - None - } else { - Some(declare_global(cx, name, ty)) - } -} - -/// Declare a private global -/// -/// Use this function when you intend to define a global without a name. -pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty) - } -} - -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - if get_defined_value(cx, name).is_some() { - cx.sess().fatal(&format!("symbol `{}` already defined", name)) - } else { - declare_fn(cx, name, fn_type) - } -} - -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_internal_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - let llfn = define_fn(cx, name, fn_type); - unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; - llfn -} - - -/// Get declared value by name. -pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { - debug!("get_declared_value(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) } -} - -/// Get defined or externally defined (AvailableExternally linkage) value by -/// name. -pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { - get_declared_value(cx, name).and_then(|val|{ - let declaration = unsafe { - llvm::LLVMIsDeclaration(val) != 0 - }; - if !declaration { - Some(val) - } else { - None + fn declare_global( + &self, + name: &str, ty: &'ll Type + ) -> &'ll Value { + debug!("declare_global(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { + llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty) } - }) + } + + fn declare_cfn( + &self, + name: &str, + fn_type: &'ll Type + ) -> &'ll Value { + declare_raw_fn(self, name, llvm::CCallConv, fn_type) + } + + fn declare_fn( + &self, + name: &str, + sig: PolyFnSig<'tcx>, + ) -> &'ll Value { + debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig); + let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + debug!("declare_rust_fn (after region erasure) sig={:?}", sig); + + let fty = FnType::new(self, sig, &[]); + let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); + + if self.layout_of(sig.output()).abi.is_uninhabited() { + llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + } + + if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { + attributes::unwind(llfn, false); + } + + fty.apply_attrs_llfn(llfn); + + llfn + } + + fn define_global( + &self, + name: &str, + ty: &'ll Type + ) -> Option<&'ll Value> { + if self.get_defined_value(name).is_some() { + None + } else { + Some(self.declare_global(name, ty)) + } + } + + fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) + } + } + + fn define_fn( + &self, + name: &str, + fn_sig: PolyFnSig<'tcx>, + ) -> &'ll Value { + if self.get_defined_value(name).is_some() { + self.sess().fatal(&format!("symbol `{}` already defined", name)) + } else { + self.declare_fn(name, fn_sig) + } + } + + fn define_internal_fn( + &self, + name: &str, + fn_sig: PolyFnSig<'tcx>, + ) -> &'ll Value { + let llfn = self.define_fn(name, fn_sig); + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; + llfn + } + + fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { + debug!("get_declared_value(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) } + } + + fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { + self.get_declared_value(name).and_then(|val|{ + let declaration = unsafe { + llvm::LLVMIsDeclaration(val) != 0 + }; + if !declaration { + Some(val) + } else { + None + } + }) + } } diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs index 5721938c9c..94776f17c7 100644 --- a/src/librustc_codegen_llvm/diagnostics.rs +++ b/src/librustc_codegen_llvm/diagnostics.rs @@ -47,37 +47,4 @@ unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok! ``` "##, -E0668: r##" -Malformed inline assembly rejected by LLVM. - -LLVM checks the validity of the constraints and the assembly string passed to -it. This error implies that LLVM seems something wrong with the inline -assembly call. - -In particular, it can happen if you forgot the closing bracket of a register -constraint (see issue #51430): -```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) -#![feature(asm)] - -fn main() { - let rax: u64; - unsafe { - asm!("" :"={rax"(rax)); - println!("Accumulator is: {}", rax); - } -} -``` -"##, - -E0669: r##" -Cannot convert inline assembly operand to a single LLVM value. - -This error usually happens when trying to pass in a value to an input inline -assembly operand that is actually a pair of values. In particular, this can -happen when trying to pass in a slice, for instance a `&str`. In Rust, these -values are represented internally as a pair of values, the pointer and its -length. When passed as an input operand, this pair of values can not be -coerced into a register and thus we must fail with an error. -"##, - } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 272196afa6..313aa17510 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -12,23 +12,28 @@ use attributes; use intrinsics::{self, Intrinsic}; -use llvm::{self, TypeKind}; +use llvm; +use llvm_util; use abi::{Abi, FnType, LlvmType, PassMode}; -use mir::place::PlaceRef; -use mir::operand::{OperandRef, OperandValue}; -use base::*; -use common::*; -use declare; -use glue; +use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::glue; +use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types}; +use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{HasDataLayout, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, Primitive}; +use rustc_codegen_ssa::common::TypeKind; use rustc::hir; -use syntax::ast; +use syntax::ast::{self, FloatTy}; use syntax::symbol::Symbol; use builder::Builder; use value::Value; +use va_arg::emit_va_arg; + +use rustc_codegen_ssa::traits::*; use rustc::session::Session; use syntax_pos::Span; @@ -83,656 +88,795 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu Some(cx.get_intrinsic(&llvm_name)) } -/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, -/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, -/// add them to librustc_codegen_llvm/context.rs -pub fn codegen_intrinsic_call( - bx: &Builder<'a, 'll, 'tcx>, - callee_ty: Ty<'tcx>, - fn_ty: &FnType<'tcx, Ty<'tcx>>, - args: &[OperandRef<'ll, 'tcx>], - llresult: &'ll Value, - span: Span, -) { - let cx = bx.cx; - let tcx = cx.tcx; +impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn codegen_intrinsic_call( + &mut self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, &'ll Value>], + llresult: &'ll Value, + span: Span, + ) { + let tcx = self.tcx; - let (def_id, substs) = match callee_ty.sty { - ty::FnDef(def_id, substs) => (def_id, substs), - _ => bug!("expected fn item type, found {}", callee_ty) - }; + let (def_id, substs) = match callee_ty.sty { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty) + }; - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = &*tcx.item_name(def_id).as_str(); + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = &*tcx.item_name(def_id).as_str(); - let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); - let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); + let llret_ty = self.layout_of(ret_ty).llvm_type(self); + let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi); - let simple = get_simple_intrinsic(cx, name); - let llval = match name { - _ if simple.is_some() => { - bx.call(simple.unwrap(), - &args.iter().map(|arg| arg.immediate()).collect::>(), - None) - } - "unreachable" => { - return; - }, - "likely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None) - } - "unlikely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None) - } - "try" => { - try_intrinsic(bx, cx, - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - llresult); - return; - } - "breakpoint" => { - let llfn = cx.get_intrinsic(&("llvm.debugtrap")); - bx.call(llfn, &[], None) - } - "size_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.size_of(tp_ty).bytes()) - } - "size_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llsize - } else { - C_usize(cx, cx.size_of(tp_ty).bytes()) + let simple = get_simple_intrinsic(self, name); + let llval = match name { + _ if simple.is_some() => { + self.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) } - } - "min_align_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).abi()) - } - "min_align_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llalign - } else { - C_usize(cx, cx.align_of(tp_ty).abi()) + "unreachable" => { + return; + }, + "likely" => { + let expect = self.get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), self.const_bool(true)], None) } - } - "pref_align_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).pref()) - } - "type_name" => { - let tp_ty = substs.type_at(0); - let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - C_str_slice(cx, ty_name) - } - "type_id" => { - C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) - } - "init" => { - let ty = substs.type_at(0); - if !cx.layout_of(ty).is_zst() { - // Just zero out the stack slot. - // If we store a zero constant, LLVM will drown in vreg allocation for large data - // structures, and the generated code will be awful. (A telltale sign of this is - // large quantities of `mov [byte ptr foo],0` in the generated code.) - memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1)); + "unlikely" => { + let expect = self.get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), self.const_bool(false)], None) } - return; - } - // Effectively no-ops - "uninit" => { - return; - } - "needs_drop" => { - let tp_ty = substs.type_at(0); - - C_bool(cx, bx.cx.type_needs_drop(tp_ty)) - } - "offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.inbounds_gep(ptr, &[offset]) - } - "arith_offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.gep(ptr, &[offset]) - } - - "copy_nonoverlapping" => { - copy_intrinsic(bx, false, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "copy" => { - copy_intrinsic(bx, true, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "write_bytes" => { - memset_intrinsic(bx, false, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - - "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bx, false, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_copy_memory" => { - copy_intrinsic(bx, true, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_set_memory" => { - memset_intrinsic(bx, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_load" | "unaligned_volatile_load" => { - let tp_ty = substs.type_at(0); - let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); + "try" => { + try_intrinsic(self, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); + return; } - let load = bx.volatile_load(ptr); - let align = if name == "unaligned_volatile_load" { - 1 - } else { - cx.align_of(tp_ty).abi() as u32 - }; - unsafe { - llvm::LLVMSetAlignment(load, align); + "breakpoint" => { + let llfn = self.get_intrinsic(&("llvm.debugtrap")); + self.call(llfn, &[], None) } - to_immediate(bx, load, cx.layout_of(tp_ty)) - }, - "volatile_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.volatile_store(bx, dst); - return; - }, - "unaligned_volatile_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.unaligned_volatile_store(bx, dst); - return; - }, - "prefetch_read_data" | "prefetch_write_data" | - "prefetch_read_instruction" | "prefetch_write_instruction" => { - let expect = cx.get_intrinsic(&("llvm.prefetch")); - let (rw, cache_type) = match name { - "prefetch_read_data" => (0, 1), - "prefetch_write_data" => (1, 1), - "prefetch_read_instruction" => (0, 0), - "prefetch_write_instruction" => (1, 0), - _ => bug!() - }; - bx.call(expect, &[ - args[0].immediate(), - C_i32(cx, rw), - args[1].immediate(), - C_i32(cx, cache_type) - ], None) - }, - "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | - "bitreverse" | "add_with_overflow" | "sub_with_overflow" | - "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | - "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, cx) { - Some((width, signed)) => - match name { - "ctlz" | "cttz" => { - let y = C_bool(bx.cx, false); - let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctlz_nonzero" | "cttz_nonzero" => { - let y = C_bool(bx.cx, true); - let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); - let llfn = cx.get_intrinsic(llvm_name); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &[args[0].immediate()], None), - "bswap" => { - if width == 8 { - args[0].immediate() // byte swap a u8/i8 is just a no-op - } else { - bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &[args[0].immediate()], None) + "size_of" => { + let tp_ty = substs.type_at(0); + self.const_usize(self.size_of(tp_ty).bytes()) + } + func @ "va_start" | func @ "va_end" => { + let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) { + (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(), + (Some(_), _) => self.load(args[0].immediate(), + tcx.data_layout.pointer_align.abi), + (None, _) => bug!("va_list language item must be defined") + }; + let intrinsic = self.cx().get_intrinsic(&format!("llvm.{}", func)); + self.call(intrinsic, &[va_list], None) + } + "va_copy" => { + let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) { + (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(), + (Some(_), _) => self.load(args[0].immediate(), + tcx.data_layout.pointer_align.abi), + (None, _) => bug!("va_list language item must be defined") + }; + let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); + self.call(intrinsic, &[llresult, va_list], None); + return; + } + "va_arg" => { + match fn_ty.ret.layout.abi { + layout::Abi::Scalar(ref scalar) => { + match scalar.value { + Primitive::Int(..) => { + if self.cx().size_of(ret_ty).bytes() < 4 { + // va_arg should not be called on a integer type + // less than 4 bytes in length. If it is, promote + // the integer to a `i32` and truncate the result + // back to the smaller type. + let promoted_result = emit_va_arg(self, args[0], + tcx.types.i32); + self.trunc(promoted_result, llret_ty) + } else { + emit_va_arg(self, args[0], ret_ty) + } + } + Primitive::Float(FloatTy::F64) | + Primitive::Pointer => { + emit_va_arg(self, args[0], ret_ty) + } + // `va_arg` should never be used with the return type f32. + Primitive::Float(FloatTy::F32) => { + bug!("the va_arg intrinsic does not work with `f32`") } } - "bitreverse" => { - bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), - &[args[0].immediate()], None) - } - "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { - let intrinsic = format!("llvm.{}{}.with.overflow.i{}", - if signed { 's' } else { 'u' }, - &name[..3], width); - let llfn = bx.cx.get_intrinsic(&intrinsic); + } + _ => { + bug!("the va_arg intrinsic does not work with non-scalar types") + } + } + } + "size_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (llsize, _) = + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + llsize + } else { + self.const_usize(self.size_of(tp_ty).bytes()) + } + } + "min_align_of" => { + let tp_ty = substs.type_at(0); + self.const_usize(self.align_of(tp_ty).bytes()) + } + "min_align_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (_, llalign) = + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + llalign + } else { + self.const_usize(self.align_of(tp_ty).bytes()) + } + } + "pref_align_of" => { + let tp_ty = substs.type_at(0); + self.const_usize(self.layout_of(tp_ty).align.pref.bytes()) + } + "type_name" => { + let tp_ty = substs.type_at(0); + let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); + self.const_str_slice(ty_name) + } + "type_id" => { + self.const_u64(self.tcx.type_id_hash(substs.type_at(0))) + } + "init" => { + let ty = substs.type_at(0); + if !self.layout_of(ty).is_zst() { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large + // data structures, and the generated code will be awful. (A telltale sign of + // this is large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic( + self, + false, + ty, + llresult, + self.const_u8(0), + self.const_usize(1) + ); + } + return; + } + // Effectively no-ops + "uninit" | "forget" => { + return; + } + "needs_drop" => { + let tp_ty = substs.type_at(0); - // Convert `i1` to a `bool`, and write it to the out parameter - let pair = bx.call(llfn, &[ - args[0].immediate(), - args[1].immediate() - ], None); - let val = bx.extract_value(pair, 0); - let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx)); + self.const_bool(self.type_needs_drop(tp_ty)) + } + "offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + self.inbounds_gep(ptr, &[offset]) + } + "arith_offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + self.gep(ptr, &[offset]) + } - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(overflow, dest.llval, dest.align); + "copy_nonoverlapping" => { + copy_intrinsic(self, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()); + return; + } + "copy" => { + copy_intrinsic(self, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()); + return; + } + "write_bytes" => { + memset_intrinsic(self, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } - return; + "volatile_copy_nonoverlapping_memory" => { + copy_intrinsic(self, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } + "volatile_copy_memory" => { + copy_intrinsic(self, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } + "volatile_set_memory" => { + memset_intrinsic(self, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } + "volatile_load" | "unaligned_volatile_load" => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_ty.ret.mode { + ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self))); + } + let load = self.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + self.align_of(tp_ty).bytes() as u32 + }; + unsafe { + llvm::LLVMSetAlignment(load, align); + } + to_immediate(self, load, self.layout_of(tp_ty)) + }, + "volatile_store" => { + let dst = args[0].deref(self.cx()); + args[1].val.volatile_store(self, dst); + return; + }, + "unaligned_volatile_store" => { + let dst = args[0].deref(self.cx()); + args[1].val.unaligned_volatile_store(self, dst); + return; + }, + "prefetch_read_data" | "prefetch_write_data" | + "prefetch_read_instruction" | "prefetch_write_instruction" => { + let expect = self.get_intrinsic(&("llvm.prefetch")); + let (rw, cache_type) = match name { + "prefetch_read_data" => (0, 1), + "prefetch_write_data" => (1, 1), + "prefetch_read_instruction" => (0, 0), + "prefetch_write_instruction" => (1, 0), + _ => bug!() + }; + self.call(expect, &[ + args[0].immediate(), + self.const_i32(rw), + args[1].immediate(), + self.const_i32(cache_type) + ], None) + }, + "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | + "bitreverse" | "add_with_overflow" | "sub_with_overflow" | + "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" | + "rotate_left" | "rotate_right" => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, self) { + Some((width, signed)) => + match name { + "ctlz" | "cttz" => { + let y = self.const_bool(false); + let llfn = self.get_intrinsic( + &format!("llvm.{}.i{}", name, width), + ); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctlz_nonzero" | "cttz_nonzero" => { + let y = self.const_bool(true); + let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); + let llfn = self.get_intrinsic(llvm_name); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctpop" => self.call( + self.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &[args[0].immediate()], + None + ), + "bswap" => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } else { + self.call( + self.get_intrinsic( + &format!("llvm.bswap.i{}", width), + ), + &[args[0].immediate()], + None, + ) + } + } + "bitreverse" => { + self.call( + self.get_intrinsic( + &format!("llvm.bitreverse.i{}", width), + ), + &[args[0].immediate()], + None, + ) + } + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { + let intrinsic = format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + &name[..3], width); + let llfn = self.get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let pair = self.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); + let val = self.extract_value(pair, 0); + let overflow = self.extract_value(pair, 1); + let overflow = self.zext(overflow, self.type_bool()); + + let dest = result.project_field(self, 0); + self.store(val, dest.llval, dest.align); + let dest = result.project_field(self, 1); + self.store(overflow, dest.llval, dest.align); + + return; + }, + "overflowing_add" => self.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => self.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => self.mul(args[0].immediate(), args[1].immediate()), + "exact_div" => + if signed { + self.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + self.exactudiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_div" => + if signed { + self.sdiv(args[0].immediate(), args[1].immediate()) + } else { + self.udiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_rem" => + if signed { + self.srem(args[0].immediate(), args[1].immediate()) + } else { + self.urem(args[0].immediate(), args[1].immediate()) + }, + "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()), + "unchecked_shr" => + if signed { + self.ashr(args[0].immediate(), args[1].immediate()) + } else { + self.lshr(args[0].immediate(), args[1].immediate()) + }, + "rotate_left" | "rotate_right" => { + let is_left = name == "rotate_left"; + let val = args[0].immediate(); + let raw_shift = args[1].immediate(); + if llvm_util::get_major_version() >= 7 { + // rotate = funnel shift with first two args the same + let llvm_name = &format!("llvm.fsh{}.i{}", + if is_left { 'l' } else { 'r' }, width); + let llfn = self.get_intrinsic(llvm_name); + self.call(llfn, &[val, val, raw_shift], None) + } else { + // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) + // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) + let width = self.const_uint( + self.type_ix(width), + width, + ); + let shift = self.urem(raw_shift, width); + let width_minus_raw_shift = self.sub(width, raw_shift); + let inv_shift = self.urem(width_minus_raw_shift, width); + let shift1 = self.shl( + val, + if is_left { shift } else { inv_shift }, + ); + let shift2 = self.lshr( + val, + if !is_left { shift } else { inv_shift }, + ); + self.or(shift1, shift2) + } + }, + _ => bug!(), }, - "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()), - "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()), - "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()), - "exact_div" => - if signed { - bx.exactsdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.exactudiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_div" => - if signed { - bx.sdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.udiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_rem" => - if signed { - bx.srem(args[0].immediate(), args[1].immediate()) - } else { - bx.urem(args[0].immediate(), args[1].immediate()) - }, - "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()), - "unchecked_shr" => - if signed { - bx.ashr(args[0].immediate(), args[1].immediate()) - } else { - bx.lshr(args[0].immediate(), args[1].immediate()) - }, - _ => bug!(), + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + return; + } + } + + }, + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let sty = &arg_tys[0].sty; + match float_type_width(sty) { + Some(_width) => + match name { + "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", name, sty)); + return; + } + } + + }, + + "discriminant_value" => { + args[0].deref(self.cx()).codegen_get_discr(self, ret_ty) + } + + name if name.starts_with("simd_") => { + match generic_simd_intrinsic(self, name, + callee_ty, + args, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return + } + } + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_[_]", and no ordering means SeqCst + name if name.starts_with("atomic_") => { + use rustc_codegen_ssa::common::AtomicOrdering::*; + use rustc_codegen_ssa::common:: + {SynchronizationScope, AtomicRmwBinOp}; + + let split: Vec<&str> = name.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => + (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => + (SequentiallyConsistent, Acquire), + _ => self.sess().fatal("unknown ordering in atomic intrinsic") }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => + (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => + (AcquireRelease, Monotonic), + _ => self.sess().fatal("unknown ordering in atomic intrinsic") + }, + _ => self.sess().fatal("Atomic intrinsic not in correct format"), + }; + + let invalid_monomorphization = |ty| { + span_invalid_monomorphization_error(tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ expected basic integer type, found `{}`", name, ty)); - return; - } - } - }, - "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { - let sty = &arg_tys[0].sty; - match float_type_width(sty) { - Some(_width) => - match name { - "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()), - "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()), - "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()), - "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), - "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()), - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic float type, found `{}`", name, sty)); - return; - } - } + }; - }, + match split[1] { + "cxchg" | "cxchgweak" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, self).is_some() { + let weak = split[1] == "cxchgweak"; + let pair = self.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); + let val = self.extract_value(pair, 0); + let success = self.extract_value(pair, 1); + let success = self.zext(success, self.type_bool()); - "discriminant_value" => { - args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty) - } - - name if name.starts_with("simd_") => { - match generic_simd_intrinsic(bx, name, - callee_ty, - args, - ret_ty, llret_ty, - span) { - Ok(llval) => llval, - Err(()) => return - } - } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_[_]", and no ordering means SeqCst - name if name.starts_with("atomic_") => { - use llvm::AtomicOrdering::*; - - let split: Vec<&str> = name.split('_').collect(); - - let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; - let (order, failorder) = match split.len() { - 2 => (SequentiallyConsistent, SequentiallyConsistent), - 3 => match split[2] { - "unordered" => (Unordered, Unordered), - "relaxed" => (Monotonic, Monotonic), - "acq" => (Acquire, Acquire), - "rel" => (Release, Monotonic), - "acqrel" => (AcquireRelease, Acquire), - "failrelaxed" if is_cxchg => - (SequentiallyConsistent, Monotonic), - "failacq" if is_cxchg => - (SequentiallyConsistent, Acquire), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - 4 => match (split[2], split[3]) { - ("acq", "failrelaxed") if is_cxchg => - (Acquire, Monotonic), - ("acqrel", "failrelaxed") if is_cxchg => - (AcquireRelease, Monotonic), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - _ => cx.sess().fatal("Atomic intrinsic not in correct format"), - }; - - let invalid_monomorphization = |ty| { - span_invalid_monomorphization_error(tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - }; - - match split[1] { - "cxchg" | "cxchgweak" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let pair = bx.atomic_cmpxchg( - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - order, - failorder, - weak); - let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx)); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(success, dest.llval, dest.align); - return; - } else { - return invalid_monomorphization(ty); - } - } - - "load" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let align = cx.align_of(ty); - bx.atomic_load(args[0].immediate(), order, align) - } else { - return invalid_monomorphization(ty); - } - } - - "store" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let align = cx.align_of(ty); - bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align); - return; - } else { - return invalid_monomorphization(ty); - } - } - - "fence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); - return; - } - - "singlethreadfence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); - return; - } - - // These are all AtomicRMW ops - op => { - let atom_op = match op { - "xchg" => llvm::AtomicXchg, - "xadd" => llvm::AtomicAdd, - "xsub" => llvm::AtomicSub, - "and" => llvm::AtomicAnd, - "nand" => llvm::AtomicNand, - "or" => llvm::AtomicOr, - "xor" => llvm::AtomicXor, - "max" => llvm::AtomicMax, - "min" => llvm::AtomicMin, - "umax" => llvm::AtomicUMax, - "umin" => llvm::AtomicUMin, - _ => cx.sess().fatal("unknown atomic operation") - }; - - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) - } else { - return invalid_monomorphization(ty); - } - } - } - } - - "nontemporal_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.nontemporal_store(bx, dst); - return; - } - - _ => { - let intr = Intrinsic::find(&name).unwrap_or_else(|| - bug!("unknown intrinsic '{}'", name)); - - fn one(x: Vec) -> T { - assert_eq!(x.len(), 1); - x.into_iter().next().unwrap() - } - fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> { - use intrinsics::Type::*; - match *t { - Void => vec![Type::void(cx)], - Integer(_signed, _width, llvm_width) => { - vec![Type::ix(cx, llvm_width as u64)] - } - Float(x) => { - match x { - 32 => vec![Type::f32(cx)], - 64 => vec![Type::f64(cx)], - _ => bug!() + let dest = result.project_field(self, 0); + self.store(val, dest.llval, dest.align); + let dest = result.project_field(self, 1); + self.store(success, dest.llval, dest.align); + return; + } else { + return invalid_monomorphization(ty); } } - Pointer(ref t, ref llvm_elem, _const) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![elem.ptr_to()] - } - Vector(ref t, ref llvm_elem, length) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![Type::vector(elem, length as u64)] - } - Aggregate(false, ref contents) => { - let elems = contents.iter() - .map(|t| one(ty_to_type(cx, t))) - .collect::>(); - vec![Type::struct_(cx, &elems, false)] - } - Aggregate(true, ref contents) => { - contents.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect() - } - } - } - // This allows an argument list like `foo, (bar, baz), - // qux` to be converted into `foo, bar, baz, qux`, integer - // arguments to be truncated as needed and pointers to be - // cast. - fn modify_as_needed( - bx: &Builder<'a, 'll, 'tcx>, - t: &intrinsics::Type, - arg: &OperandRef<'ll, 'tcx>, - ) -> Vec<&'ll Value> { - match *t { - intrinsics::Type::Aggregate(true, ref contents) => { - // We found a tuple that needs squishing! So - // run over the tuple and load each field. - // - // This assumes the type is "simple", i.e. no - // destructors, and the contents are SIMD - // etc. - assert!(!bx.cx.type_needs_drop(arg.layout.ty)); - let (ptr, align) = match arg.val { - OperandValue::Ref(ptr, None, align) => (ptr, align), - _ => bug!() + "load" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, self).is_some() { + let size = self.size_of(ty); + self.atomic_load(args[0].immediate(), order, size) + } else { + return invalid_monomorphization(ty); + } + } + + "store" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, self).is_some() { + let size = self.size_of(ty); + self.atomic_store( + args[1].immediate(), + args[0].immediate(), + order, + size + ); + return; + } else { + return invalid_monomorphization(ty); + } + } + + "fence" => { + self.atomic_fence(order, SynchronizationScope::CrossThread); + return; + } + + "singlethreadfence" => { + self.atomic_fence(order, SynchronizationScope::SingleThread); + return; + } + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => AtomicRmwBinOp::AtomicXchg, + "xadd" => AtomicRmwBinOp::AtomicAdd, + "xsub" => AtomicRmwBinOp::AtomicSub, + "and" => AtomicRmwBinOp::AtomicAnd, + "nand" => AtomicRmwBinOp::AtomicNand, + "or" => AtomicRmwBinOp::AtomicOr, + "xor" => AtomicRmwBinOp::AtomicXor, + "max" => AtomicRmwBinOp::AtomicMax, + "min" => AtomicRmwBinOp::AtomicMin, + "umax" => AtomicRmwBinOp::AtomicUMax, + "umin" => AtomicRmwBinOp::AtomicUMin, + _ => self.sess().fatal("unknown atomic operation") }; - let arg = PlaceRef::new_sized(ptr, arg.layout, align); - (0..contents.len()).map(|i| { - arg.project_field(bx, i).load(bx).immediate() - }).collect() + + let ty = substs.type_at(0); + if int_type_width_signed(ty, self).is_some() { + self.atomic_rmw( + atom_op, + args[0].immediate(), + args[1].immediate(), + order + ) + } else { + return invalid_monomorphization(ty); + } } - intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())] - } - intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))] - } - intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { - // the LLVM intrinsic uses a smaller integer - // size than the C intrinsic's signature, so - // we have to trim it down here. - vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))] - } - _ => vec![arg.immediate()], } } + "nontemporal_store" => { + let dst = args[0].deref(self.cx()); + args[1].val.nontemporal_store(self, dst); + return; + } - let inputs = intr.inputs.iter() + _ => { + let intr = match Intrinsic::find(&name) { + Some(intr) => intr, + None => bug!("unknown intrinsic '{}'", name), + }; + fn one(x: Vec) -> T { + assert_eq!(x.len(), 1); + x.into_iter().next().unwrap() + } + fn ty_to_type<'ll>( + cx: &CodegenCx<'ll, '_>, + t: &intrinsics::Type + ) -> Vec<&'ll Type> { + use intrinsics::Type::*; + match *t { + Void => vec![cx.type_void()], + Integer(_signed, _width, llvm_width) => { + vec![cx.type_ix( llvm_width as u64)] + } + Float(x) => { + match x { + 32 => vec![cx.type_f32()], + 64 => vec![cx.type_f64()], + _ => bug!() + } + } + Pointer(ref t, ref llvm_elem, _const) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_ptr_to(elem)] + } + Vector(ref t, ref llvm_elem, length) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_vector(elem, length as u64)] + } + Aggregate(false, ref contents) => { + let elems = contents.iter() + .map(|t| one(ty_to_type(cx, t))) + .collect::>(); + vec![cx.type_struct( &elems, false)] + } + Aggregate(true, ref contents) => { + contents.iter() .flat_map(|t| ty_to_type(cx, t)) - .collect::>(); - - let outputs = one(ty_to_type(cx, &intr.output)); - - let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { - modify_as_needed(bx, t, arg) - }).collect(); - assert_eq!(inputs.len(), llargs.len()); - - let val = match intr.definition { - intrinsics::IntrinsicDef::Named(name) => { - let f = declare::declare_cfn(cx, - name, - Type::func(&inputs, outputs)); - bx.call(f, &llargs, None) - } - }; - - match *intr.output { - intrinsics::Type::Aggregate(flatten, ref elems) => { - // the output is a tuple so we need to munge it properly - assert!(!flatten); - - for i in 0..elems.len() { - let dest = result.project_field(bx, i); - let val = bx.extract_value(val, i as u64); - bx.store(val, dest.llval, dest.align); + .collect() + } } - return; } - _ => val, + + // This allows an argument list like `foo, (bar, baz), + // qux` to be converted into `foo, bar, baz, qux`, integer + // arguments to be truncated as needed and pointers to be + // cast. + fn modify_as_needed<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + t: &intrinsics::Type, + arg: &OperandRef<'tcx, &'ll Value>, + ) -> Vec<&'ll Value> { + match *t { + intrinsics::Type::Aggregate(true, ref contents) => { + // We found a tuple that needs squishing! So + // run over the tuple and load each field. + // + // This assumes the type is "simple", i.e. no + // destructors, and the contents are SIMD + // etc. + assert!(!bx.type_needs_drop(arg.layout.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, None, align) => (ptr, align), + _ => bug!() + }; + let arg = PlaceRef::new_sized(ptr, arg.layout, align); + (0..contents.len()).map(|i| { + let field = arg.project_field(bx, i); + bx.load_operand(field).immediate() + }).collect() + } + intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { + let llvm_elem = one(ty_to_type(bx, llvm_elem)); + vec![bx.pointercast(arg.immediate(), bx.type_ptr_to(llvm_elem))] + } + intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { + let llvm_elem = one(ty_to_type(bx, llvm_elem)); + vec![ + bx.bitcast(arg.immediate(), + bx.type_vector(llvm_elem, length as u64)) + ] + } + intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { + // the LLVM intrinsic uses a smaller integer + // size than the C intrinsic's signature, so + // we have to trim it down here. + vec![bx.trunc(arg.immediate(), bx.type_ix(llvm_width as u64))] + } + _ => vec![arg.immediate()], + } + } + + + let inputs = intr.inputs.iter() + .flat_map(|t| ty_to_type(self, t)) + .collect::>(); + + let outputs = one(ty_to_type(self, &intr.output)); + + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(self, t, arg) + }).collect(); + assert_eq!(inputs.len(), llargs.len()); + + let val = match intr.definition { + intrinsics::IntrinsicDef::Named(name) => { + let f = self.declare_cfn( + name, + self.type_func(&inputs, outputs), + ); + self.call(f, &llargs, None) + } + }; + + match *intr.output { + intrinsics::Type::Aggregate(flatten, ref elems) => { + // the output is a tuple so we need to munge it properly + assert!(!flatten); + + for i in 0..elems.len() { + let dest = result.project_field(self, i); + let val = self.extract_value(val, i as u64); + self.store(val, dest.llval, dest.align); + } + return; + } + _ => val, + } + } + }; + + if !fn_ty.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_ty.ret.mode { + let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); + let ptr = self.pointercast(result.llval, ptr_llty); + self.store(llval, ptr, result.align); + } else { + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + .val.store(self, result); } } - }; + } - if !fn_ty.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to()); - bx.store(llval, ptr, result.align); - } else { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val.store(bx, result); - } + fn abort(&mut self) { + let fnname = self.get_intrinsic(&("llvm.trap")); + self.call(fnname, &[], None); + } + + fn assume(&mut self, val: Self::Value) { + let assume_intrinsic = self.get_intrinsic("llvm.assume"); + self.call(assume_intrinsic, &[val], None); + } + + fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value { + let expect = self.get_intrinsic(&"llvm.expect.i1"); + self.call(expect, &[cond, self.const_bool(expected)], None) } } fn copy_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, allow_overlap: bool, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, src: &'ll Value, count: &'ll Value, -) -> &'ll Value { - let cx = bx.cx; - let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); - - let operation = if allow_overlap { - "memmove" +) { + let (size, align) = bx.size_and_align_of(ty); + let size = bx.mul(bx.const_usize(size.bytes()), count); + let flags = if volatile { + MemFlags::VOLATILE } else { - "memcpy" + MemFlags::empty() }; - - let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, - cx.data_layout().pointer_size.bits()); - - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); - let llfn = cx.get_intrinsic(&name); - - bx.call(llfn, - &[dst_ptr, - src_ptr, - bx.mul(size, count), - align, - C_bool(cx, volatile)], - None) + if allow_overlap { + bx.memmove(dst, align, src, align, size, flags); + } else { + bx.memcpy(dst, align, src, align, size, flags); + } } fn memset_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, val: &'ll Value, count: &'ll Value -) -> &'ll Value { - let cx = bx.cx; - let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); - let dst = bx.pointercast(dst, Type::i8p(cx)); - call_memset(bx, dst, val, bx.mul(size, count), align, volatile) +) { + let (size, align) = bx.size_and_align_of(ty); + let size = bx.mul(bx.const_usize(size.bytes()), count); + let flags = if volatile { + MemFlags::VOLATILE + } else { + MemFlags::empty() + }; + bx.memset(dst, val, size, align, flags); } fn try_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, @@ -740,12 +884,12 @@ fn try_intrinsic( ) { if bx.sess().no_landing_pads() { bx.call(func, &[data], None); - let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align); + let ptr_align = bx.tcx().data_layout.pointer_align.abi; + bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align); } else if wants_msvc_seh(bx.sess()) { - codegen_msvc_try(bx, cx, func, data, local_ptr, dest); + codegen_msvc_try(bx, func, data, local_ptr, dest); } else { - codegen_gnu_try(bx, cx, func, data, local_ptr, dest); + codegen_gnu_try(bx, func, data, local_ptr, dest); } } @@ -757,22 +901,19 @@ fn try_intrinsic( // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. fn codegen_msvc_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; + let llfn = get_rust_try_fn(bx, &mut |mut bx| { + bx.set_personality_fn(bx.eh_personality()); - bx.set_personality_fn(bx.cx.eh_personality()); - - let normal = bx.build_sibling_block("normal"); - let catchswitch = bx.build_sibling_block("catchswitch"); - let catchpad = bx.build_sibling_block("catchpad"); - let caught = bx.build_sibling_block("caught"); + let mut normal = bx.build_sibling_block("normal"); + let mut catchswitch = bx.build_sibling_block("catchswitch"); + let mut catchpad = bx.build_sibling_block("catchpad"); + let mut caught = bx.build_sibling_block("caught"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); @@ -817,40 +958,41 @@ fn codegen_msvc_try( // } // // More information can be found in libstd's seh.rs implementation. - let i64p = Type::i64(cx).ptr_to(); - let ptr_align = bx.tcx().data_layout.pointer_align; + let i64p = bx.type_ptr_to(bx.type_i64()); + let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(C_i32(cx, 0)); + normal.ret(bx.const_i32(0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); - let tcx = cx.tcx; - let tydesc = match tcx.lang_items().msvc_try_filter() { - Some(did) => ::consts::get_static(cx, did), + let tydesc = match bx.tcx().lang_items().msvc_try_filter() { + Some(did) => bx.get_static(did), None => bug!("msvc_try_filter not defined"), }; - let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]); + let funclet = catchpad.catch_pad(cs, &[tydesc, bx.const_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); - let i64_align = bx.tcx().data_layout.i64_align; + let i64_align = bx.tcx().data_layout.i64_align.abi; let arg1 = catchpad.load(addr, i64_align); - let val1 = C_i32(cx, 1); - let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); + let val1 = bx.const_i32(1); + let gep1 = catchpad.inbounds_gep(addr, &[val1]); + let arg2 = catchpad.load(gep1, i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); + let gep2 = catchpad.inbounds_gep(local_ptr, &[val1]); catchpad.store(arg1, local_ptr, i64_align); - catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); - catchpad.catch_ret(tok, caught.llbb()); + catchpad.store(arg2, gep2, i64_align); + catchpad.catch_ret(&funclet, caught.llbb()); - caught.ret(C_i32(cx, 1)); + caught.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llfn, &[func, data, local_ptr], None); - let i32_align = bx.tcx().data_layout.i32_align; + let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -866,16 +1008,13 @@ fn codegen_msvc_try( // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. fn codegen_gnu_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; - + let llfn = get_rust_try_fn(bx, &mut |mut bx| { // Codegens the shims described above: // // bx: @@ -893,14 +1032,14 @@ fn codegen_gnu_try( // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bx.build_sibling_block("then"); - let catch = bx.build_sibling_block("catch"); + let mut then = bx.build_sibling_block("then"); + let mut catch = bx.build_sibling_block("catch"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(C_i32(cx, 0)); + then.ret(bx.const_i32(0)); // Type indicator for the exception being thrown. // @@ -908,19 +1047,20 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); - let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1); - catch.add_clause(vals, C_null(Type::i8p(cx))); + let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); + catch.add_clause(vals, bx.const_null(bx.type_i8p())); let ptr = catch.extract_value(vals, 0); - let ptr_align = bx.tcx().data_layout.pointer_align; - catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); - catch.ret(C_i32(cx, 1)); + let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p())); + catch.store(ptr, bitcast, ptr_align); + catch.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llfn, &[func, data, local_ptr], None); - let i32_align = bx.tcx().data_layout.i32_align; + let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -933,14 +1073,14 @@ fn gen_fn<'ll, 'tcx>( output: Ty<'tcx>, codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), ) -> &'ll Value { - let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig( + let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig( inputs.into_iter(), output, false, hir::Unsafety::Unsafe, Abi::Rust - ))); - let llfn = declare::define_internal_fn(cx, name, rust_fn_ty); + )); + let llfn = cx.define_internal_fn(name, rust_fn_sig); attributes::from_fn_attrs(cx, llfn, None); let bx = Builder::new_block(cx, llfn, "entry-block"); codegen(bx); @@ -980,10 +1120,10 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, name: &str, callee_ty: Ty<'tcx>, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, llret_ty: &'ll Type, span: Span @@ -1056,7 +1196,7 @@ fn generic_simd_intrinsic( found `{}` with length {}", in_len, in_ty, ret_ty, out_len); - require!(llret_ty.element_type().kind() == TypeKind::Integer, + require!(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, "expected return type with integer elements, found `{}` with non-integer `{}`", ret_ty, ret_ty.simd_type(tcx)); @@ -1092,8 +1232,8 @@ fn generic_simd_intrinsic( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, i as u64); - match const_to_opt_u128(val, true) { + let val = bx.const_get_elt(vector, i as u64); + match bx.const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); None @@ -1103,18 +1243,18 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(C_i32(bx.cx, idx as i32)), + Some(idx) => Some(bx.const_i32(idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(C_null(llret_ty)) + None => return Ok(bx.const_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - C_vector(&indices))) + bx.const_vector(&indices))) } if name == "simd_insert" { @@ -1145,8 +1285,8 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, m_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1156,9 +1296,9 @@ fn generic_simd_intrinsic( in_elem: &::rustc::ty::TyS, in_ty: &::rustc::ty::TyS, in_len: usize, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, span: Span, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ) -> Result<&'ll Value, ()> { macro_rules! emit_error { ($msg: tt) => { @@ -1207,7 +1347,7 @@ fn generic_simd_intrinsic( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); - let intrinsic = bx.cx.get_intrinsic(&llvm_name); + let intrinsic = bx.get_intrinsic(&llvm_name); let c = bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::>(), None); @@ -1278,16 +1418,16 @@ fn generic_simd_intrinsic( mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { - ty::Int(v) => Type::int_from_ty(cx, v), - ty::Uint(v) => Type::uint_from_ty(cx, v), - ty::Float(v) => Type::float_from_ty(cx, v), + ty::Int(v) => cx.type_int_from_ty( v), + ty::Uint(v) => cx.type_uint_from_ty( v), + ty::Float(v) => cx.type_float_from_ty( v), _ => unreachable!(), }; while no_pointers > 0 { - elem_ty = elem_ty.ptr_to(); + elem_ty = cx.type_ptr_to(elem_ty); no_pointers -= 1; } - Type::vector(elem_ty, vec_len as u64) + cx.type_vector(elem_ty, vec_len as u64) } @@ -1364,29 +1504,32 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty, - llvm_elem_vec_ty], llvm_elem_vec_ty)); + let f = bx.declare_cfn(&llvm_intrinsic, + bx.type_func(&[ + llvm_pointer_vec_ty, + alignment_ty, + mask_ty, + llvm_elem_vec_ty], llvm_elem_vec_ty)); llvm::SetUnnamedAddr(f, false); let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); @@ -1461,30 +1604,30 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = Type::void(bx.cx); + let ret_t = bx.type_void(); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_elem_vec_ty, + let f = bx.declare_cfn(&llvm_intrinsic, + bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1524,7 +1667,7 @@ fn generic_simd_intrinsic( // code is generated // * if the accumulator of the fmul isn't 1, incorrect // code is generated - match const_get_real(acc) { + match bx.const_get_real(acc) { None => return_error!("accumulator of {} is not a constant", $name), Some((v, loses_info)) => { if $name.contains("mul") && v != 1.0_f64 { @@ -1540,8 +1683,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => C_undef(Type::f32(bx.cx)), - 64 => C_undef(Type::f64(bx.cx)), + 32 => bx.const_undef(bx.type_f32()), + 64 => bx.const_undef(bx.type_f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1618,8 +1761,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, } // boolean reductions operate on vectors of i1s: - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { @@ -1629,7 +1772,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, if !$boolean { r } else { - bx.zext(r, Type::bool(bx.cx)) + bx.zext(r, bx.type_bool()) } ) }, diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 63a8ab077e..4f90cb793b 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -37,11 +37,10 @@ #![feature(static_nobundle)] use back::write::create_target_machine; -use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; -#[macro_use] extern crate bitflags; extern crate flate2; +#[macro_use] extern crate bitflags; extern crate libc; #[macro_use] extern crate rustc; extern crate jobserver; @@ -56,6 +55,7 @@ extern crate rustc_incremental; extern crate rustc_llvm; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_codegen_utils; +extern crate rustc_codegen_ssa; extern crate rustc_fs_util; #[macro_use] extern crate log; @@ -67,40 +67,38 @@ extern crate cc; // Used to locate MSVC extern crate tempfile; extern crate memmap; -use back::bytecode::RLIB_BYTECODE_EXTENSION; - +use rustc_codegen_ssa::traits::*; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; +use rustc_codegen_ssa::CompiledModule; +use errors::{FatalError, Handler}; +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use syntax_pos::symbol::InternedString; +use rustc::mir::mono::Stats; pub use llvm_util::target_features; use std::any::Any; -use std::path::{PathBuf}; -use std::sync::mpsc; -use rustc_data_structures::sync::Lrc; +use std::sync::{mpsc, Arc}; use rustc::dep_graph::DepGraph; -use rustc::hir::def_id::CrateNum; -use rustc::middle::cstore::MetadataLoader; -use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; -use rustc::middle::lang_items::LangItem; +use rustc::middle::allocator::AllocatorKind; +use rustc::middle::cstore::{EncodedMetadata, MetadataLoader}; use rustc::session::{Session, CompileIncomplete}; use rustc::session::config::{OutputFilenames, OutputType, PrintRequest}; use rustc::ty::{self, TyCtxt}; use rustc::util::time_graph; -use rustc::util::nodemap::{FxHashSet, FxHashMap}; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_utils::codegen_backend::CodegenBackend; -use rustc_data_structures::svh::Svh; mod diagnostics; mod back { - pub use rustc_codegen_utils::symbol_names; mod archive; pub mod bytecode; - mod command; - pub mod linker; pub mod link; pub mod lto; - pub mod symbol_export; pub mod write; mod rpath; pub mod wasm; @@ -118,22 +116,111 @@ mod consts; mod context; mod debuginfo; mod declare; -mod glue; mod intrinsic; -pub mod llvm; + +// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912. +#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; } + mod llvm_util; mod metadata; -mod meth; -mod mir; mod mono_item; mod type_; mod type_of; mod value; +mod va_arg; +#[derive(Clone)] pub struct LlvmCodegenBackend(()); -impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis -impl !Sync for LlvmCodegenBackend {} +impl ExtraBackendMethods for LlvmCodegenBackend { + fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm { + ModuleLlvm::new(sess, mod_name) + } + fn write_metadata<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + metadata: &ModuleLlvm + ) -> EncodedMetadata { + base::write_metadata(tcx, metadata) + } + fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { + unsafe { allocator::codegen(tcx, mods, kind) } + } + fn compile_codegen_unit<'a, 'tcx: 'a>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu_name: InternedString, + ) -> Stats { + base::compile_codegen_unit(tcx, cgu_name) + } + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool + ) -> Arc + Result<&'static mut llvm::TargetMachine, String> + Send + Sync> { + back::write::target_machine_factory(sess, find_features) + } + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { + llvm_util::target_cpu(sess) + } +} + +impl WriteBackendMethods for LlvmCodegenBackend { + type Module = ModuleLlvm; + type ModuleBuffer = back::lto::ModuleBuffer; + type Context = llvm::Context; + type TargetMachine = &'static mut llvm::TargetMachine; + type ThinData = back::lto::ThinData; + type ThinBuffer = back::lto::ThinBuffer; + fn print_pass_timings(&self) { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline + ) -> Result<(Vec>, Vec), FatalError> { + back::lto::run(cgcx, modules, cached_modules, timeline) + } + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result<(), FatalError> { + back::write::optimize(cgcx, diag_handler, module, config, timeline) + } + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline + ) -> Result, FatalError> { + back::lto::optimize_thin_module(thin, cgcx, timeline) + } + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result { + back::write::codegen(cgcx, diag_handler, module, config, timeline) + } + fn run_lto_pass_manager( + cgcx: &CodegenContext, + module: &ModuleCodegen, + config: &ModuleConfig, + thin: bool + ) { + back::lto::run_pass_manager(cgcx, module, config, thin) + } +} + +unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis +unsafe impl Sync for LlvmCodegenBackend {} impl LlvmCodegenBackend { pub fn new() -> Box { @@ -194,24 +281,24 @@ impl CodegenBackend for LlvmCodegenBackend { } fn provide(&self, providers: &mut ty::query::Providers) { - back::symbol_names::provide(providers); - back::symbol_export::provide(providers); - base::provide(providers); + rustc_codegen_utils::symbol_names::provide(providers); + rustc_codegen_ssa::back::symbol_export::provide(providers); + rustc_codegen_ssa::base::provide_both(providers); attributes::provide(providers); } fn provide_extern(&self, providers: &mut ty::query::Providers) { - back::symbol_export::provide_extern(providers); - base::provide_extern(providers); + rustc_codegen_ssa::back::symbol_export::provide_extern(providers); + rustc_codegen_ssa::base::provide_both(providers); attributes::provide_extern(providers); } - fn codegen_crate<'a, 'tcx>( + fn codegen_crate<'b, 'tcx>( &self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'b, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> Box { - box base::codegen_crate(tcx, rx) + box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) } fn join_codegen_and_link( @@ -222,12 +309,13 @@ impl CodegenBackend for LlvmCodegenBackend { outputs: &OutputFilenames, ) -> Result<(), CompileIncomplete>{ use rustc::util::common::time; - let (ongoing_codegen, work_products) = - ongoing_codegen.downcast::<::back::write::OngoingCodegen>() + let (codegen_results, work_products) = + ongoing_codegen.downcast:: + >() .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box") .join(sess); if sess.opts.debugging_opts.incremental_info { - back::write::dump_incremental_data(&ongoing_codegen); + rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results); } time(sess, @@ -245,14 +333,14 @@ impl CodegenBackend for LlvmCodegenBackend { // This should produce either a finished executable or library. sess.profiler(|p| p.start_activity(ProfileCategory::Linking)); time(sess, "linking", || { - back::link::link_binary(sess, &ongoing_codegen, - outputs, &ongoing_codegen.crate_name.as_str()); + back::link::link_binary(sess, &codegen_results, + outputs, &codegen_results.crate_name.as_str()); }); sess.profiler(|p| p.end_activity(ProfileCategory::Linking)); // Now that we won't touch anything in the incremental compilation directory // any more, we can finalize it (which involves renaming it) - rustc_incremental::finalize_session_directory(sess, ongoing_codegen.crate_hash); + rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash); Ok(()) } @@ -264,73 +352,7 @@ pub fn __rustc_codegen_backend() -> Box { LlvmCodegenBackend::new() } -struct ModuleCodegen { - /// The name of the module. When the crate may be saved between - /// compilations, incremental compilation requires that name be - /// unique amongst **all** crates. Therefore, it should contain - /// something unique to this crate (e.g., a module path) as well - /// as the crate name and disambiguator. - /// We currently generate these names via CodegenUnit::build_cgu_name(). - name: String, - module_llvm: ModuleLlvm, - kind: ModuleKind, -} - -struct CachedModuleCodegen { - name: String, - source: WorkProduct, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum ModuleKind { - Regular, - Metadata, - Allocator, -} - -impl ModuleCodegen { - fn into_compiled_module(self, - emit_obj: bool, - emit_bc: bool, - emit_bc_compressed: bool, - outputs: &OutputFilenames) -> CompiledModule { - let object = if emit_obj { - Some(outputs.temp_path(OutputType::Object, Some(&self.name))) - } else { - None - }; - let bytecode = if emit_bc { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) - } else { - None - }; - let bytecode_compressed = if emit_bc_compressed { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) - .with_extension(RLIB_BYTECODE_EXTENSION)) - } else { - None - }; - - CompiledModule { - name: self.name.clone(), - kind: self.kind, - object, - bytecode, - bytecode_compressed, - } - } -} - -#[derive(Debug)] -struct CompiledModule { - name: String, - kind: ModuleKind, - object: Option, - bytecode: Option, - bytecode_compressed: Option, -} - -struct ModuleLlvm { +pub struct ModuleLlvm { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, tm: &'static mut llvm::TargetMachine, @@ -369,35 +391,4 @@ impl Drop for ModuleLlvm { } } -struct CodegenResults { - crate_name: Symbol, - modules: Vec, - allocator_module: Option, - metadata_module: CompiledModule, - crate_hash: Svh, - metadata: rustc::middle::cstore::EncodedMetadata, - windows_subsystem: Option, - linker_info: back::linker::LinkerInfo, - crate_info: CrateInfo, -} - -/// Misc info we load from metadata to persist beyond the tcx -struct CrateInfo { - panic_runtime: Option, - compiler_builtins: Option, - profiler_runtime: Option, - sanitizer_runtime: Option, - is_no_builtins: FxHashSet, - native_libraries: FxHashMap>>, - crate_name: FxHashMap, - used_libraries: Lrc>, - link_args: Lrc>, - used_crate_source: FxHashMap>, - used_crates_static: Vec<(CrateNum, LibSource)>, - used_crates_dynamic: Vec<(CrateNum, LibSource)>, - wasm_imports: FxHashMap, - lang_item_to_crate: FxHashMap, - missing_lang_items: FxHashMap>, -} - __build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/llvm/archive_ro.rs b/src/librustc_codegen_llvm/llvm/archive_ro.rs index 2a77f256e3..d5c73fecf8 100644 --- a/src/librustc_codegen_llvm/llvm/archive_ro.rs +++ b/src/librustc_codegen_llvm/llvm/archive_ro.rs @@ -10,10 +10,10 @@ //! A wrapper around LLVM's archive (.a) code -use std::ffi::CString; use std::path::Path; use std::slice; use std::str; +use rustc_fs_util::path_to_c_string; pub struct ArchiveRO { pub raw: &'static mut super::Archive, @@ -38,24 +38,12 @@ impl ArchiveRO { /// raised. pub fn open(dst: &Path) -> Result { return unsafe { - let s = path2cstr(dst); + let s = path_to_c_string(dst); let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| { super::last_error().unwrap_or_else(|| "failed to open archive".to_owned()) })?; Ok(ArchiveRO { raw: ar }) }; - - #[cfg(unix)] - fn path2cstr(p: &Path) -> CString { - use std::os::unix::prelude::*; - use std::ffi::OsStr; - let p: &OsStr = p.as_ref(); - CString::new(p.as_bytes()).unwrap() - } - #[cfg(windows)] - fn path2cstr(p: &Path) -> CString { - CString::new(p.to_str().unwrap()).unwrap() - } } pub fn iter(&self) -> Iter { diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 0b98fa4eaf..127759a432 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,6 +19,8 @@ use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; +use syntax; +use rustc_codegen_ssa; use super::RustString; @@ -141,6 +143,23 @@ pub enum IntPredicate { IntSLE = 41, } +impl IntPredicate { + pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self { + match intpre { + rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ, + rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE, + rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT, + rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE, + rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT, + rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE, + rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT, + rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE, + rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT, + rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE, + } + } +} + /// LLVMRealPredicate #[derive(Copy, Clone)] #[repr(C)] @@ -163,6 +182,31 @@ pub enum RealPredicate { RealPredicateTrue = 15, } +impl RealPredicate { + pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self { + match realpred { + rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => + RealPredicate::RealPredicateFalse, + rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT, + rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE, + rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT, + rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE, + rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE, + rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD, + rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO, + rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT, + rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE, + rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT, + rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE, + rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE, + rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => + RealPredicate::RealPredicateTrue + } + } +} + /// LLVMTypeKind #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] @@ -186,6 +230,30 @@ pub enum TypeKind { Token = 16, } +impl TypeKind { + pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind { + match self { + TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void, + TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half, + TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float, + TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double, + TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80, + TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128, + TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128, + TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label, + TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer, + TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function, + TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct, + TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array, + TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer, + TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector, + TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata, + TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX, + TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token, + } + } +} + /// LLVMAtomicRmwBinOp #[derive(Copy, Clone)] #[repr(C)] @@ -203,6 +271,24 @@ pub enum AtomicRmwBinOp { AtomicUMin = 10, } +impl AtomicRmwBinOp { + pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self { + match op { + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + } + } +} + /// LLVMAtomicOrdering #[derive(Copy, Clone)] #[repr(C)] @@ -218,6 +304,23 @@ pub enum AtomicOrdering { SequentiallyConsistent = 7, } +impl AtomicOrdering { + pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self { + match ao { + rustc_codegen_ssa::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + rustc_codegen_ssa::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release, + rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => + AtomicOrdering::AcquireRelease, + rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => + AtomicOrdering::SequentiallyConsistent + } + } +} + + /// LLVMRustSynchronizationScope #[derive(Copy, Clone)] #[repr(C)] @@ -229,6 +332,18 @@ pub enum SynchronizationScope { CrossThread, } +impl SynchronizationScope { + pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self { + match sc { + rustc_codegen_ssa::common::SynchronizationScope::Other => SynchronizationScope::Other, + rustc_codegen_ssa::common::SynchronizationScope::SingleThread => + SynchronizationScope::SingleThread, + rustc_codegen_ssa::common::SynchronizationScope::CrossThread => + SynchronizationScope::CrossThread, + } + } +} + /// LLVMRustFileType #[derive(Copy, Clone)] #[repr(C)] @@ -269,6 +384,15 @@ pub enum AsmDialect { Intel, } +impl AsmDialect { + pub fn from_generic(asm: syntax::ast::AsmDialect) -> Self { + match asm { + syntax::ast::AsmDialect::Att => AsmDialect::Att, + syntax::ast::AsmDialect::Intel => AsmDialect::Intel + } + } +} + /// LLVMRustCodeGenOptLevel #[derive(Copy, Clone, PartialEq)] #[repr(C)] @@ -998,6 +1122,22 @@ extern "C" { Bundle: Option<&OperandBundleDef<'a>>, Name: *const c_char) -> &'a Value; + pub fn LLVMRustBuildMemCpy(B: &Builder<'a>, + Dst: &'a Value, + DstAlign: c_uint, + Src: &'a Value, + SrcAlign: c_uint, + Size: &'a Value, + IsVolatile: bool) + -> &'a Value; + pub fn LLVMRustBuildMemMove(B: &Builder<'a>, + Dst: &'a Value, + DstAlign: c_uint, + Src: &'a Value, + SrcAlign: c_uint, + Size: &'a Value, + IsVolatile: bool) + -> &'a Value; pub fn LLVMBuildSelect(B: &Builder<'a>, If: &'a Value, Then: &'a Value, @@ -1041,42 +1181,42 @@ extern "C" { pub fn LLVMRustBuildVectorReduceFAdd(B: &Builder<'a>, Acc: &'a Value, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceFMul(B: &Builder<'a>, Acc: &'a Value, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceAdd(B: &Builder<'a>, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceMul(B: &Builder<'a>, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceAnd(B: &Builder<'a>, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceOr(B: &Builder<'a>, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceXor(B: &Builder<'a>, Src: &'a Value) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceMin(B: &Builder<'a>, Src: &'a Value, IsSigned: bool) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceMax(B: &Builder<'a>, Src: &'a Value, IsSigned: bool) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceFMin(B: &Builder<'a>, Src: &'a Value, IsNaN: bool) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildVectorReduceFMax(B: &Builder<'a>, Src: &'a Value, IsNaN: bool) - -> Option<&'a Value>; + -> &'a Value; pub fn LLVMRustBuildMinNum( B: &Builder<'a>, @@ -1157,7 +1297,7 @@ extern "C" { RunInliner: Bool); pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( PMB: &PassManagerBuilder, - PM: &PassManager) -> bool; + PM: &PassManager); // Stuff that's in rustllvm/ because it's not upstream yet. @@ -1218,6 +1358,7 @@ extern "C" { pub fn LLVMRustDebugMetadataVersion() -> u32; pub fn LLVMRustVersionMajor() -> u32; pub fn LLVMRustVersionMinor() -> u32; + pub fn LLVMRustIsRustLLVM() -> bool; pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32); @@ -1307,6 +1448,19 @@ extern "C" { Ty: &'a DIType) -> &'a DIDerivedType; + pub fn LLVMRustDIBuilderCreateVariantMemberType(Builder: &DIBuilder<'a>, + Scope: &'a DIScope, + Name: *const c_char, + File: &'a DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u32, + OffsetInBits: u64, + Discriminant: Option<&'a Value>, + Flags: DIFlags, + Ty: &'a DIType) + -> &'a DIType; + pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: &DIBuilder<'a>, Scope: &'a DIScope, File: &'a DIFile, @@ -1384,7 +1538,8 @@ extern "C" { SizeInBits: u64, AlignInBits: u32, Elements: &'a DIArray, - ClassType: &'a DIType) + ClassType: &'a DIType, + IsFixed: bool) -> &'a DIType; pub fn LLVMRustDIBuilderCreateUnionType(Builder: &DIBuilder<'a>, @@ -1400,6 +1555,19 @@ extern "C" { UniqueId: *const c_char) -> &'a DIType; + pub fn LLVMRustDIBuilderCreateVariantPart(Builder: &DIBuilder<'a>, + Scope: &'a DIScope, + Name: *const c_char, + File: &'a DIFile, + LineNo: c_uint, + SizeInBits: u64, + AlignInBits: u32, + Flags: DIFlags, + Discriminator: Option<&'a DIDerivedType>, + Elements: &'a DIArray, + UniqueId: *const c_char) + -> &'a DIDerivedType; + pub fn LLVMSetUnnamedAddr(GlobalVar: &Value, UnnamedAddr: Bool); pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: &DIBuilder<'a>, @@ -1419,9 +1587,10 @@ extern "C" { LineNo: c_uint) -> &'a DINameSpace; - pub fn LLVMRustDICompositeTypeSetTypeArray(Builder: &DIBuilder<'a>, - CompositeType: &'a DIType, - TypeArray: &'a DIArray); + pub fn LLVMRustDICompositeTypeReplaceArrays(Builder: &DIBuilder<'a>, + CompositeType: &'a DIType, + Elements: Option<&'a DIArray>, + Params: Option<&'a DIArray>); pub fn LLVMRustDIBuilderCreateDebugLocation(Context: &'a Context, @@ -1599,8 +1768,6 @@ extern "C" { pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer); pub fn LLVMRustModuleCost(M: &Module) -> u64; - pub fn LLVMRustThinLTOAvailable() -> bool; - pub fn LLVMRustPGOAvailable() -> bool; pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> &'static mut ThinLTOBuffer; pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer); pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char; diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index 4343c8c184..fbd5192a63 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -190,7 +190,7 @@ impl ObjectFile { pub fn new(llmb: &'static mut MemoryBuffer) -> Option { unsafe { let llof = LLVMCreateObjectFile(llmb)?; - Some(ObjectFile { llof: llof }) + Some(ObjectFile { llof }) } } } diff --git a/src/librustc_codegen_llvm/llvm_util.rs b/src/librustc_codegen_llvm/llvm_util.rs index 0a80fdddbf..fdb6373bea 100644 --- a/src/librustc_codegen_llvm/llvm_util.rs +++ b/src/librustc_codegen_llvm/llvm_util.rs @@ -70,6 +70,9 @@ unsafe fn configure_llvm(sess: &Session) { if sess.opts.debugging_opts.disable_instrumentation_preinliner { add("-disable-preinline"); } + if llvm::LLVMRustIsRustLLVM() { + add("-mergefunc-use-aliases"); + } for arg in &sess.opts.cg.llvm_args { add(&(*arg)); @@ -184,7 +187,7 @@ const WASM_WHITELIST: &[(&str, Option<&str>)] = &[ ]; /// When rustdoc is running, provide a list of all known features so that all their respective -/// primtives may be documented. +/// primitives may be documented. /// /// IMPORTANT: If you're adding another whitelist to the above lists, make sure to add it to this /// iterator! @@ -243,7 +246,8 @@ pub fn target_feature_whitelist(sess: &Session) "hexagon" => HEXAGON_WHITELIST, "mips" | "mips64" => MIPS_WHITELIST, "powerpc" | "powerpc64" => POWERPC_WHITELIST, - "wasm32" => WASM_WHITELIST, + // wasm32 on emscripten does not support these target features + "wasm32" if !sess.target.target.options.is_like_emscripten => WASM_WHITELIST, _ => &[], } } @@ -256,6 +260,10 @@ pub fn print_version() { } } +pub fn get_major_version() -> u32 { + unsafe { llvm::LLVMRustVersionMajor() } +} + pub fn print_passes() { // Can be called without initializing LLVM unsafe { llvm::LLVMRustPrintPasses(); } diff --git a/src/librustc_codegen_llvm/metadata.rs b/src/librustc_codegen_llvm/metadata.rs index 7752465d88..5605f64c2e 100644 --- a/src/librustc_codegen_llvm/metadata.rs +++ b/src/librustc_codegen_llvm/metadata.rs @@ -18,7 +18,7 @@ use rustc_data_structures::owning_ref::OwningRef; use std::path::Path; use std::ptr; use std::slice; -use rustc_fs_util::path2cstr; +use rustc_fs_util::path_to_c_string; pub use rustc_data_structures::sync::MetadataRef; @@ -57,7 +57,7 @@ impl MetadataLoader for LlvmMetadataLoader { filename: &Path) -> Result { unsafe { - let buf = path2cstr(filename); + let buf = path_to_c_string(filename); let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr()) .ok_or_else(|| format!("error reading library: '{}'", filename.display()))?; let of = ObjectFile::new(mb) diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs deleted file mode 100644 index 9f0f744389..0000000000 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use rustc::mir::interpret::{ConstEvalErr, read_target_uint}; -use rustc_mir::const_eval::const_field; -use rustc::hir::def_id::DefId; -use rustc::mir; -use rustc_data_structures::indexed_vec::Idx; -use rustc_data_structures::sync::Lrc; -use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType}; -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; -use builder::Builder; -use common::{CodegenCx}; -use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize}; -use consts; -use type_of::LayoutLlvmExt; -use type_::Type; -use syntax::ast::Mutability; -use syntax::source_map::Span; -use value::Value; - -use super::super::callee; -use super::FunctionCx; - -pub fn scalar_to_llvm( - cx: &CodegenCx<'ll, '_>, - cv: Scalar, - layout: &layout::Scalar, - llty: &'ll Type, -) -> &'ll Value { - let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; - match cv { - Scalar::Bits { size: 0, .. } => { - assert_eq!(0, layout.value.size(cx).bytes()); - C_undef(Type::ix(cx, 0)) - }, - Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = C_uint_big(Type::ix(cx, bitsize), bits); - if layout.value == layout::Pointer { - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } - } else { - consts::bitcast(llval, llty) - } - }, - Scalar::Ptr(ptr) => { - let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id); - let base_addr = match alloc_type { - Some(AllocType::Memory(alloc)) => { - let init = const_alloc_to_llvm(cx, alloc); - if alloc.mutability == Mutability::Mutable { - consts::addr_of_mut(cx, init, alloc.align, None) - } else { - consts::addr_of(cx, init, alloc.align, None) - } - } - Some(AllocType::Function(fn_instance)) => { - callee::get_fn(cx, fn_instance) - } - Some(AllocType::Static(def_id)) => { - assert!(cx.tcx.is_static(def_id).is_some()); - consts::get_static(cx, def_id) - } - None => bug!("missing allocation {:?}", ptr.alloc_id), - }; - let llval = unsafe { llvm::LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(cx)), - &C_usize(cx, ptr.offset.bytes()), - 1, - ) }; - if layout.value != layout::Pointer { - unsafe { llvm::LLVMConstPtrToInt(llval, llty) } - } else { - consts::bitcast(llval, llty) - } - } - } -} - -pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { - let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); - let layout = cx.data_layout(); - let pointer_size = layout.pointer_size.bytes() as usize; - - let mut next_offset = 0; - for &(offset, ((), alloc_id)) in alloc.relocations.iter() { - let offset = offset.bytes(); - assert_eq!(offset as usize as u64, offset); - let offset = offset as usize; - if offset > next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset])); - } - let ptr_offset = read_target_uint( - layout.endian, - &alloc.bytes[offset..(offset + pointer_size)], - ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - llvals.push(scalar_to_llvm( - cx, - Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), - &layout::Scalar { - value: layout::Primitive::Pointer, - valid_range: 0..=!0 - }, - Type::i8p(cx) - )); - next_offset = offset + pointer_size; - } - if alloc.bytes.len() >= next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset ..])); - } - - C_struct(cx, &llvals, true) -} - -pub fn codegen_static_initializer( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, -) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { - let instance = ty::Instance::mono(cx.tcx, def_id); - let cid = GlobalId { - instance, - promoted: None, - }; - let param_env = ty::ParamEnv::reveal_all(); - let static_ = cx.tcx.const_eval(param_env.and(cid))?; - - let alloc = match static_.val { - ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, - _ => bug!("static const eval returned {:#?}", static_), - }; - Ok((const_alloc_to_llvm(cx, alloc), alloc)) -} - -impl FunctionCx<'a, 'll, 'tcx> { - fn fully_evaluate( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - constant: &'tcx ty::Const<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { - match constant.val { - ConstValue::Unevaluated(def_id, ref substs) => { - let tcx = bx.tcx(); - let param_env = ty::ParamEnv::reveal_all(); - let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); - let cid = GlobalId { - instance, - promoted: None, - }; - tcx.const_eval(param_env.and(cid)) - }, - _ => Ok(constant), - } - } - - pub fn eval_mir_constant( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - constant: &mir::Constant<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { - let c = self.monomorphize(&constant.literal); - self.fully_evaluate(bx, c) - } - - /// process constant containing SIMD shuffle indices - pub fn simd_shuffle_indices( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - span: Span, - ty: Ty<'tcx>, - constant: Result<&'tcx ty::Const<'tcx>, Lrc>>, - ) -> (&'ll Value, Ty<'tcx>) { - constant - .and_then(|c| { - let field_ty = c.ty.builtin_index().unwrap(); - let fields = match c.ty.sty { - ty::Array(_, n) => n.unwrap_usize(bx.tcx()), - ref other => bug!("invalid simd shuffle type: {}", other), - }; - let values: Result, Lrc<_>> = (0..fields).map(|field| { - let field = const_field( - bx.tcx(), - ty::ParamEnv::reveal_all(), - self.instance, - None, - mir::Field::new(field as usize), - c, - )?; - if let Some(prim) = field.val.try_to_scalar() { - let layout = bx.cx.layout_of(field_ty); - let scalar = match layout.abi { - layout::Abi::Scalar(ref x) => x, - _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) - }; - Ok(scalar_to_llvm( - bx.cx, prim, scalar, - layout.immediate_llvm_type(bx.cx), - )) - } else { - bug!("simd shuffle field {:?}", field) - } - }).collect(); - let llval = C_struct(bx.cx, &values?, false); - Ok((llval, c.ty)) - }) - .unwrap_or_else(|e| { - e.report_as_error( - bx.tcx().at(span), - "could not evaluate shuffle_indices at compile time", - ); - // We've errored, so we don't have to produce working code. - let ty = self.monomorphize(&ty); - let llty = bx.cx.layout_of(ty).llvm_type(bx.cx); - (C_undef(llty), ty) - }) - } -} diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index dab9b147cc..9b2d17d65c 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -8,181 +8,82 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Walks the crate looking for items/impl-items/trait-items that have -//! either a `rustc_symbol_name` or `rustc_item_path` attribute and -//! generates an error giving, respectively, the symbol name or -//! item-path. This is used for unit testing the code that generates -//! paths etc in all kinds of annoying scenarios. - -use asm; use attributes; use base; -use consts; use context::CodegenCx; -use declare; use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; -use rustc::hir; -use rustc::hir::def::Def; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::TypeFoldable; -use rustc::ty::layout::LayoutOf; -use std::fmt; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; +use rustc_codegen_ssa::traits::*; pub use rustc::mir::mono::MonoItem; -pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; +impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn predefine_static(&self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); -pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { - fn define(&self, cx: &CodegenCx<'a, 'tcx>) { - debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); + let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { + self.sess().span_fatal(self.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); - match *self.as_mono_item() { - MonoItem::Static(def_id) => { - let tcx = cx.tcx; - let is_mutable = match tcx.describe_def(def_id) { - Some(Def::Static(_, is_mutable)) => is_mutable, - Some(other) => { - bug!("Expected Def::Static, found {:?}", other) - } - None => { - bug!("Expected Def::Static for {:?}, found nothing", def_id) - } - }; - consts::codegen_static(&cx, def_id, is_mutable); - } - MonoItem::GlobalAsm(node_id) => { - let item = cx.tcx.hir.expect_item(node_id); - if let hir::ItemKind::GlobalAsm(ref ga) = item.node { - asm::codegen_global_asm(cx, ga); - } else { - span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") - } - } - MonoItem::Fn(instance) => { - base::codegen_instance(&cx, instance); - } + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); } - debug!("END IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); + self.instances.borrow_mut().insert(instance, g); } - fn predefine(&self, - cx: &CodegenCx<'a, 'tcx>, - linkage: Linkage, - visibility: Visibility) { - debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); - - let symbol_name = self.symbol_name(cx.tcx).as_str(); - - debug!("symbol {}", &symbol_name); - - match *self.as_mono_item() { - MonoItem::Static(def_id) => { - predefine_static(cx, def_id, linkage, visibility, &symbol_name); - } - MonoItem::Fn(instance) => { - predefine_fn(cx, instance, linkage, visibility, &symbol_name); - } - MonoItem::GlobalAsm(..) => {} - } - - debug!("END PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); - } - - fn to_raw_string(&self) -> String { - match *self.as_mono_item() { - MonoItem::Fn(instance) => { - format!("Fn({:?}, {})", - instance.def, - instance.substs.as_ptr() as usize) - } - MonoItem::Static(id) => { - format!("Static({:?})", id) - } - MonoItem::GlobalAsm(id) => { - format!("GlobalAsm({:?})", id) - } - } - } -} - -impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} - -fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - def_id: DefId, + fn predefine_fn(&self, + instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) { - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); - let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| { - cx.sess().span_fatal(cx.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name)) - }); - - unsafe { - llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); - llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); - } - - cx.instances.borrow_mut().insert(instance, g); -} - -fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - instance: Instance<'tcx>, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - assert!(!instance.substs.needs_infer() && - !instance.substs.has_param_types()); - - let mono_ty = instance.ty(cx.tcx); - let attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); - let lldecl = declare::declare_fn(cx, symbol_name, mono_ty); - unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; - base::set_link_section(lldecl, &attrs); - if linkage == Linkage::LinkOnceODR || - linkage == Linkage::WeakODR { - llvm::SetUniqueComdat(cx.llmod, lldecl); - } - - // If we're compiling the compiler-builtins crate, e.g. the equivalent of - // compiler-rt, then we want to implicitly compile everything with hidden - // visibility as we're going to link this object all over the place but - // don't want the symbols to get exported. - if linkage != Linkage::Internal && linkage != Linkage::Private && - cx.tcx.is_compiler_builtins(LOCAL_CRATE) { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + let mono_sig = instance.fn_sig(self.tcx()); + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + let lldecl = self.declare_fn(symbol_name, mono_sig); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(self.llmod, lldecl); } - } else { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + self.tcx.is_compiler_builtins(LOCAL_CRATE) { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + } + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + } } - } - debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); - if instance.def.is_inline(cx.tcx) { - attributes::inline(cx, lldecl, attributes::InlineAttr::Hint); - } - attributes::from_fn_attrs(cx, lldecl, Some(instance.def.def_id())); + debug!("predefine_fn: mono_sig = {:?} instance = {:?}", mono_sig, instance); + if instance.def.is_inline(self.tcx) { + attributes::inline(self, lldecl, attributes::InlineAttr::Hint); + } + attributes::from_fn_attrs(self, lldecl, Some(instance.def.def_id())); - cx.instances.borrow_mut().insert(instance, lldecl); + self.instances.borrow_mut().insert(instance, lldecl); + } } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 51a233d791..b100b67780 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -13,15 +13,23 @@ pub use llvm::Type; use llvm; -use llvm::{Bool, False, True, TypeKind}; - +use llvm::{Bool, False, True}; use context::CodegenCx; +use rustc_codegen_ssa::traits::*; +use value::Value; -use syntax::ast; -use rustc::ty::layout::{self, Align, Size}; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::Ty; +use rustc::ty::layout::TyLayout; +use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; +use common; +use rustc_codegen_ssa::common::TypeKind; +use type_of::LayoutLlvmExt; +use abi::{LlvmType, FnTypeExt}; use std::fmt; +use std::cell::RefCell; use libc::c_uint; @@ -39,231 +47,184 @@ impl fmt::Debug for Type { } } -impl Type { - pub fn void(cx: &CodegenCx<'ll, '_>) -> &'ll Type { +impl CodegenCx<'ll, 'tcx> { + crate fn type_named_struct(&self, name: &str) -> &'ll Type { + let name = SmallCStr::new(name); unsafe { - llvm::LLVMVoidTypeInContext(cx.llcx) + llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) } } - pub fn metadata(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + crate fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { unsafe { - llvm::LLVMRustMetadataTypeInContext(cx.llcx) + llvm::LLVMStructSetBody(ty, els.as_ptr(), + els.len() as c_uint, packed as Bool) } } +} - pub fn i1(cx: &CodegenCx<'ll, '_>) -> &'ll Type { +impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn type_void(&self) -> &'ll Type { unsafe { - llvm::LLVMInt1TypeInContext(cx.llcx) + llvm::LLVMVoidTypeInContext(self.llcx) } } - pub fn i8(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_metadata(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(cx.llcx) + llvm::LLVMRustMetadataTypeInContext(self.llcx) } } - pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + fn type_i1(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(llcx) + llvm::LLVMInt1TypeInContext(self.llcx) } } - pub fn i16(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i8(&self) -> &'ll Type { unsafe { - llvm::LLVMInt16TypeInContext(cx.llcx) + llvm::LLVMInt8TypeInContext(self.llcx) } } - pub fn i32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + + fn type_i16(&self) -> &'ll Type { unsafe { - llvm::LLVMInt32TypeInContext(cx.llcx) + + llvm::LLVMInt16TypeInContext(self.llcx) } } - pub fn i64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i32(&self) -> &'ll Type { unsafe { - llvm::LLVMInt64TypeInContext(cx.llcx) + llvm::LLVMInt32TypeInContext(self.llcx) } } - pub fn i128(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i64(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, 128) + llvm::LLVMInt64TypeInContext(self.llcx) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix(cx: &CodegenCx<'ll, '_>, num_bits: u64) -> &'ll Type { + fn type_i128(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) + llvm::LLVMIntTypeInContext(self.llcx, 128) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { + fn type_ix(&self, num_bits: u64) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) } } - pub fn f32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_isize(&self) -> &'ll Type { + self.isize_ty + } + + fn type_f32(&self) -> &'ll Type { unsafe { - llvm::LLVMFloatTypeInContext(cx.llcx) + llvm::LLVMFloatTypeInContext(self.llcx) } } - pub fn f64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_f64(&self) -> &'ll Type { unsafe { - llvm::LLVMDoubleTypeInContext(cx.llcx) + llvm::LLVMDoubleTypeInContext(self.llcx) } } - pub fn bool(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i8(cx) - } - - pub fn char(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i32(cx) - } - - pub fn i8p(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i8(cx).ptr_to() - } - - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to() - } - - pub fn isize(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - cx.isize_ty - } - - pub fn c_int(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - match &cx.tcx.sess.target.target.target_c_int_width[..] { - "16" => Type::i16(cx), - "32" => Type::i32(cx), - "64" => Type::i64(cx), - width => bug!("Unsupported target_c_int_width: {}", width), + fn type_x86_mmx(&self) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(self.llcx) } } - pub fn int_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::IntTy) -> &'ll Type { - match t { - ast::IntTy::Isize => cx.isize_ty, - ast::IntTy::I8 => Type::i8(cx), - ast::IntTy::I16 => Type::i16(cx), - ast::IntTy::I32 => Type::i32(cx), - ast::IntTy::I64 => Type::i64(cx), - ast::IntTy::I128 => Type::i128(cx), - } - } - - pub fn uint_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::UintTy) -> &'ll Type { - match t { - ast::UintTy::Usize => cx.isize_ty, - ast::UintTy::U8 => Type::i8(cx), - ast::UintTy::U16 => Type::i16(cx), - ast::UintTy::U32 => Type::i32(cx), - ast::UintTy::U64 => Type::i64(cx), - ast::UintTy::U128 => Type::i128(cx), - } - } - - pub fn float_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::FloatTy) -> &'ll Type { - match t { - ast::FloatTy::F32 => Type::f32(cx), - ast::FloatTy::F64 => Type::f64(cx), - } - } - - pub fn func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + fn type_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) } } - pub fn variadic_func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + fn type_variadic_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } - pub fn struct_(cx: &CodegenCx<'ll, '_>, els: &[&'ll Type], packed: bool) -> &'ll Type { + fn type_struct( + &self, + els: &[&'ll Type], + packed: bool + ) -> &'ll Type { unsafe { - llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), + llvm::LLVMStructTypeInContext(self.llcx, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn named_struct(cx: &CodegenCx<'ll, '_>, name: &str) -> &'ll Type { - let name = SmallCStr::new(name); - unsafe { - llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()) - } - } - - pub fn array(ty: &Type, len: u64) -> &Type { + fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMRustArrayType(ty, len) } } - pub fn vector(ty: &Type, len: u64) -> &Type { + fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - pub fn kind(&self) -> TypeKind { + fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { - llvm::LLVMRustGetTypeKind(self) + llvm::LLVMRustGetTypeKind(ty).to_generic() } } - pub fn set_struct_body(&'ll self, els: &[&'ll Type], packed: bool) { + fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { + assert_ne!(self.type_kind(ty), TypeKind::Function, + "don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead"); + ty.ptr_to() + } + + fn element_type(&self, ty: &'ll Type) -> &'ll Type { unsafe { - llvm::LLVMStructSetBody(self, els.as_ptr(), - els.len() as c_uint, packed as Bool) + llvm::LLVMGetElementType(ty) } } - pub fn ptr_to(&self) -> &Type { + fn vector_length(&self, ty: &'ll Type) -> usize { unsafe { - llvm::LLVMPointerType(self, 0) + llvm::LLVMGetVectorSize(ty) as usize } } - pub fn element_type(&self) -> &Type { + fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { - llvm::LLVMGetElementType(self) - } - } - - /// Return the number of elements in `self` if it is a LLVM vector type. - pub fn vector_length(&self) -> usize { - unsafe { - llvm::LLVMGetVectorSize(self) as usize - } - } - - pub fn func_params(&self) -> Vec<&Type> { - unsafe { - let n_args = llvm::LLVMCountParamTypes(self) as usize; + let n_args = llvm::LLVMCountParamTypes(ty) as usize; let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(self, args.as_mut_ptr()); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); args.set_len(n_args); args } } - pub fn float_width(&self) -> usize { - match self.kind() { + fn float_width(&self, ty: &'ll Type) -> usize { + match self.type_kind(ty) { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, @@ -272,45 +233,84 @@ impl Type { } } - /// Retrieve the bit width of the integer type `self`. - pub fn int_width(&self) -> u64 { + fn int_width(&self, ty: &'ll Type) -> u64 { unsafe { - llvm::LLVMGetIntTypeWidth(self) as u64 + llvm::LLVMGetIntTypeWidth(ty) as u64 } } - pub fn from_integer(cx: &CodegenCx<'ll, '_>, i: layout::Integer) -> &'ll Type { - use rustc::ty::layout::Integer::*; - match i { - I8 => Type::i8(cx), - I16 => Type::i16(cx), - I32 => Type::i32(cx), - I64 => Type::i64(cx), - I128 => Type::i128(cx), + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + common::val_ty(v) + } + + fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { + &self.scalar_lltypes + } +} + +impl Type { + pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + unsafe { + llvm::LLVMInt8TypeInContext(llcx) } } - /// Return a LLVM type that has at most the required alignment, - /// as a conservative approximation for unknown pointee types. - pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_>, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_abi_align(cx, align); - Type::from_integer(cx, ity) - } - - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. - pub fn padding_filler(cx: &CodegenCx<'ll, '_>, size: Size, align: Align) -> &'ll Type { - let unit = layout::Integer::approximate_abi_align(cx, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - Type::array(Type::from_integer(cx, unit), size / unit_size) - } - - pub fn x86_mmx(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + // Creates an integer type with the given number of bits, e.g. i24 + pub fn ix_llcx( + llcx: &llvm::Context, + num_bits: u64 + ) -> &Type { unsafe { - llvm::LLVMX86MMXTypeInContext(cx.llcx) + llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + } + } + + pub fn i8p_llcx(llcx: &'ll llvm::Context) -> &'ll Type { + Type::i8_llcx(llcx).ptr_to() + } + + fn ptr_to(&self) -> &Type { + unsafe { + llvm::LLVMPointerType(&self, 0) } } } + + +impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type { + layout.llvm_type(self) + } + fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type { + layout.immediate_llvm_type(self) + } + fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool { + layout.is_llvm_immediate() + } + fn is_backend_scalar_pair(&self, layout: TyLayout<'tcx>) -> bool { + layout.is_llvm_scalar_pair() + } + fn backend_field_index(&self, layout: TyLayout<'tcx>, index: usize) -> u64 { + layout.llvm_field_index(index) + } + fn scalar_pair_element_backend_type<'a>( + &self, + layout: TyLayout<'tcx>, + index: usize, + immediate: bool + ) -> &'ll Type { + layout.scalar_pair_element_llvm_type(self, index, immediate) + } + fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { + ty.llvm_type(self) + } + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.llvm_type(self) + } + fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.ptr_to_llvm_type(self) + } + fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { + ty.llvm_type(self) + } +} diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 03ded64e64..15b5bdeb44 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -16,6 +16,7 @@ use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; +use rustc_codegen_ssa::traits::*; use std::fmt::Write; @@ -37,14 +38,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, (cx.sess().target.target.arch == "x86" || cx.sess().target.target.arch == "x86_64"); if use_x86_mmx { - return Type::x86_mmx(cx) + return cx.type_x86_mmx() } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return Type::vector(element, count); + return cx.type_vector(element, count); } } layout::Abi::ScalarPair(..) => { - return Type::struct_(cx, &[ + return cx.type_struct( &[ layout.scalar_pair_element_llvm_type(cx, 0, false), layout.scalar_pair_element_llvm_type(cx, 1, false), ], false); @@ -79,30 +80,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, match layout.fields { layout::FieldPlacement::Union(_) => { - let fill = Type::padding_filler(cx, layout.size, layout.align); + let fill = cx.type_padding_filler(layout.size, layout.align.abi); let packed = false; match name { None => { - Type::struct_(cx, &[fill], packed) + cx.type_struct( &[fill], packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], packed); + let llty = cx.type_named_struct( name); + cx.set_struct_body(llty, &[fill], packed); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(layout.field(cx, 0).llvm_type(cx), count) + cx.type_array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { let (llfields, packed) = struct_llfields(cx, layout); - Type::struct_(cx, &llfields, packed) + cx.type_struct( &llfields, packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); + let llty = cx.type_named_struct( name); *defer = Some((llty, layout)); llty } @@ -119,24 +120,24 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let mut packed = false; let mut offset = Size::ZERO; - let mut prev_effective_align = layout.align; + let mut prev_effective_align = layout.align.abi; let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); for i in layout.fields.index_by_increasing_offset() { let target_offset = layout.fields.offset(i as usize); let field = layout.field(cx, i); - let effective_field_align = layout.align - .min(field.align) + let effective_field_align = layout.align.abi + .min(field.align.abi) .restrict_for_offset(target_offset); - packed |= effective_field_align.abi() < field.align.abi(); + packed |= effective_field_align < field.align.abi; debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \ effective_field_align: {}", - i, field, offset, target_offset, effective_field_align.abi()); + i, field, offset, target_offset, effective_field_align.bytes()); assert!(target_offset >= offset); let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); - assert_eq!(offset.abi_align(padding_align) + padding, target_offset); - result.push(Type::padding_filler(cx, padding, padding_align)); + assert_eq!(offset.align_to(padding_align) + padding, target_offset); + result.push(cx.type_padding_filler( padding, padding_align)); debug!(" padding before: {:?}", padding); result.push(field.llvm_type(cx)); @@ -150,10 +151,10 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } let padding = layout.size - offset; let padding_align = prev_effective_align; - assert_eq!(offset.abi_align(padding_align) + padding, layout.size); + assert_eq!(offset.align_to(padding_align) + padding, layout.size); debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, layout.size); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.type_padding_filler(padding, padding_align)); assert_eq!(result.len(), 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -165,7 +166,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, impl<'a, 'tcx> CodegenCx<'a, 'tcx> { pub fn align_of(&self, ty: Ty<'tcx>) -> Align { - self.layout_of(ty).align + self.layout_of(ty).align.abi } pub fn size_of(&self, ty: Ty<'tcx>) -> Size { @@ -173,7 +174,8 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { } pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { - self.layout_of(ty).size_and_align() + let layout = self.layout_of(ty); + (layout.size, layout.align.abi) } } @@ -255,17 +257,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let llty = match self.ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.layout_of(ty).llvm_type(cx).ptr_to() + cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to() + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } ty::FnPtr(sig) => { let sig = cx.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() + cx.fn_ptr_backend_type(&FnType::new(cx, sig, &[])) } _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; @@ -285,7 +287,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { debug!("llvm_type({:#?})", self); - assert!(!self.ty.has_escaping_regions(), "{:?} has escaping regions", self.ty); + assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty); // Make sure lifetimes are erased, to avoid generating distinct LLVM // types for Rust types that only differ in the choice of lifetimes. @@ -307,7 +309,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { if let Some((llty, layout)) = defer { let (llfields, packed) = struct_llfields(cx, layout); - llty.set_struct_body(&llfields, packed) + cx.set_struct_body(llty, &llfields, packed) } llty @@ -316,7 +318,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { - return Type::i1(cx); + return cx.type_i1(); } } self.llvm_type(cx) @@ -325,17 +327,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { - layout::Int(i, _) => Type::from_integer(cx, i), - layout::Float(FloatTy::F32) => Type::f32(cx), - layout::Float(FloatTy::F64) => Type::f64(cx), + layout::Int(i, _) => cx.type_from_integer( i), + layout::Float(FloatTy::F32) => cx.type_f32(), + layout::Float(FloatTy::F64) => cx.type_f64(), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - Type::pointee_for_abi_align(cx, pointee.align) + cx.type_pointee_for_align(pointee.align) } else { - Type::i8(cx) + cx.type_i8() }; - pointee.ptr_to() + cx.type_ptr_to(pointee) } } } @@ -369,13 +371,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. if immediate && scalar.is_bool() { - return Type::i1(cx); + return cx.type_i1(); } let offset = if index == 0 { Size::ZERO } else { - a.value.size(cx).abi_align(b.value.align(cx)) + a.value.size(cx).align_to(b.value.align(cx).abi) }; self.scalar_llvm_type_at(cx, scalar, offset) } diff --git a/src/librustc_codegen_llvm/va_arg.rs b/src/librustc_codegen_llvm/va_arg.rs new file mode 100644 index 0000000000..fbc3e6f06d --- /dev/null +++ b/src/librustc_codegen_llvm/va_arg.rs @@ -0,0 +1,142 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use builder::Builder; +use rustc_codegen_ssa::mir::operand::OperandRef; +use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}; +use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size}; +use rustc::ty::Ty; +use type_::Type; +use type_of::LayoutLlvmExt; +use value::Value; + +#[allow(dead_code)] +fn round_pointer_up_to_alignment( + bx: &mut Builder<'a, 'll, 'tcx>, + addr: &'ll Value, + align: Align, + ptr_ty: &'ll Type +) -> &'ll Value { + let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize()); + ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1)); + ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32))); + bx.inttoptr(ptr_as_int, ptr_ty) +} + +fn emit_direct_ptr_va_arg( + bx: &mut Builder<'a, 'll, 'tcx>, + list: OperandRef<'tcx, &'ll Value>, + llty: &'ll Type, + size: Size, + align: Align, + slot_size: Align, + allow_higher_align: bool +) -> (&'ll Value, Align) { + let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p()); + let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { + bx.bitcast(list.immediate(), va_list_ptr_ty) + } else { + list.immediate() + }; + + let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi); + + let (addr, addr_align) = if allow_higher_align && align > slot_size { + (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) + } else { + (ptr, slot_size) + }; + + + let aligned_size = size.align_to(slot_size).bytes() as i32; + let full_direct_size = bx.cx().const_i32(aligned_size); + let next = bx.inbounds_gep(addr, &[full_direct_size]); + bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); + + if size.bytes() < slot_size.bytes() && + &*bx.tcx().sess.target.target.target_endian == "big" { + let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); + let adjusted = bx.inbounds_gep(addr, &[adjusted_size]); + (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) + } else { + (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) + } +} + +fn emit_ptr_va_arg( + bx: &mut Builder<'a, 'll, 'tcx>, + list: OperandRef<'tcx, &'ll Value>, + target_ty: Ty<'tcx>, + indirect: bool, + slot_size: Align, + allow_higher_align: bool +) -> &'ll Value { + let layout = bx.cx.layout_of(target_ty); + let (llty, size, align) = if indirect { + (bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx), + bx.cx.data_layout().pointer_size, + bx.cx.data_layout().pointer_align) + } else { + (layout.llvm_type(bx.cx), + layout.size, + layout.align) + }; + let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, + slot_size, allow_higher_align); + if indirect { + let tmp_ret = bx.load(addr, addr_align); + bx.load(tmp_ret, align.abi) + } else { + bx.load(addr, addr_align) + } +} + +pub(super) fn emit_va_arg( + bx: &mut Builder<'a, 'll, 'tcx>, + addr: OperandRef<'tcx, &'ll Value>, + target_ty: Ty<'tcx>, +) -> &'ll Value { + // Determine the va_arg implementation to use. The LLVM va_arg instruction + // is lacking in some instances, so we should only use it as a fallback. + let arch = &bx.cx.tcx.sess.target.target.arch; + match (&**arch, + bx.cx.tcx.sess.target.target.options.is_like_windows) { + ("x86", true) => { + emit_ptr_va_arg(bx, addr, target_ty, false, + Align::from_bytes(4).unwrap(), false) + } + ("x86_64", true) => { + let target_ty_size = bx.cx.size_of(target_ty).bytes(); + let indirect = if target_ty_size > 8 || !target_ty_size.is_power_of_two() { + true + } else { + false + }; + emit_ptr_va_arg(bx, addr, target_ty, indirect, + Align::from_bytes(8).unwrap(), false) + } + ("x86", false) => { + emit_ptr_va_arg(bx, addr, target_ty, false, + Align::from_bytes(4).unwrap(), true) + } + _ => { + let va_list = if (bx.tcx().sess.target.target.arch == "aarch64" || + bx.tcx().sess.target.target.arch == "x86_64" || + bx.tcx().sess.target.target.arch == "powerpc") && + !bx.tcx().sess.target.target.options.is_like_windows { + bx.load(addr.immediate(), bx.tcx().data_layout.pointer_align.abi) + } else { + addr.immediate() + }; + bx.va_arg(va_list, bx.cx.layout_of(target_ty).llvm_type(bx.cx)) + } + } +} + diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml new file mode 100644 index 0000000000..7b1c7cfb56 --- /dev/null +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -0,0 +1,34 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_codegen_ssa" +version = "0.0.0" + +[lib] +name = "rustc_codegen_ssa" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +bitflags = "1.0.4" +cc = "1.0.1" +num_cpus = "1.0" +rustc-demangle = "0.1.4" +memmap = "0.6" +log = "0.4.5" +libc = "0.2.43" +jobserver = "0.1.11" + +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +rustc = { path = "../librustc" } +rustc_allocator = { path = "../librustc_allocator" } +rustc_apfloat = { path = "../librustc_apfloat" } +rustc_codegen_utils = { path = "../librustc_codegen_utils" } +rustc_data_structures = { path = "../librustc_data_structures"} +rustc_errors = { path = "../librustc_errors" } +rustc_fs_util = { path = "../librustc_fs_util" } +rustc_incremental = { path = "../librustc_incremental" } +rustc_mir = { path = "../librustc_mir" } +rustc_target = { path = "../librustc_target" } diff --git a/src/librustc_codegen_ssa/README.md b/src/librustc_codegen_ssa/README.md new file mode 100644 index 0000000000..9e1d429180 --- /dev/null +++ b/src/librustc_codegen_ssa/README.md @@ -0,0 +1,121 @@ +# Refactoring of `rustc_codegen_llvm` +by Denis Merigoux, October 23rd 2018 + +## State of the code before the refactoring + +All the code related to the compilation of MIR into LLVM IR was contained inside the `rustc_codegen_llvm` crate. Here is the breakdown of the most important elements: +* the `back` folder (7,800 LOC) implements the mechanisms for creating the different object files and archive through LLVM, but also the communication mechanisms for parallel code generation; +* the `debuginfo` (3,200 LOC) folder contains all code that passes debug information down to LLVM; +* the `llvm` (2,200 LOC) folder defines the FFI necessary to communicate with LLVM using the C++ API; +* the `mir` (4,300 LOC) folder implements the actual lowering from MIR to LLVM IR; +* the `base.rs` (1,300 LOC) file contains some helper functions but also the high-level code that launches the code generation and distributes the work. +* the `builder.rs` (1,200 LOC) file contains all the functions generating individual LLVM IR instructions inside a basic block; +* the `common.rs` (450 LOC) contains various helper functions and all the functions generating LLVM static values; +* the `type_.rs` (300 LOC) defines most of the type translations to LLVM IR. + +The goal of this refactoring is to separate inside this crate code that is specific to the LLVM from code that can be reused for other rustc backends. For instance, the `mir` folder is almost entirely backend-specific but it relies heavily on other parts of the crate. The separation of the code must not affect the logic of the code nor its performance. + +For these reasons, the separation process involves two transformations that have to be done at the same time for the resulting code to compile : + +1. replace all the LLVM-specific types by generics inside function signatures and structure definitions; +2. encapsulate all functions calling the LLVM FFI inside a set of traits that will define the interface between backend-agnostic code and the backend. + +While the LLVM-specific code will be left in `rustc_codegen_llvm`, all the new traits and backend-agnostic code will be moved in `rustc_codegen_ssa` (name suggestion by @eddyb). + +## Generic types and structures + +@irinagpopa started to parametrize the types of `rustc_codegen_llvm` by a generic `Value` type, implemented in LLVM by a reference `&'ll Value`. This work has been extended to all structures inside the `mir` folder and elsewhere, as well as for LLVM's `BasicBlock` and `Type` types. + +The two most important structures for the LLVM codegen are `CodegenCx` and `Builder`. They are parametrized by multiple liftime parameters and the type for `Value`. + +```rust +struct CodegenCx<'ll, 'tcx: 'll> { + /* ... */ +} + +struct Builder<'a, 'll: 'a, 'tcx: 'll> { + cx: &'a CodegenCx<'ll, 'tcx>, + /* ... */ +} +``` + +`CodegenCx` is used to compile one codegen-unit that can contain multiple functions, whereas `Builder` is created to compile one basic block. + +The code in `rustc_codegen_llvm` has to deal with multiple explicit lifetime parameters, that correspond to the following: +* `'tcx` is the longest lifetime, that corresponds to the original `TyCtxt` containing the program's information; +* `'a` is a short-lived reference of a `CodegenCx` or another object inside a struct; +* `'ll` is the lifetime of references to LLVM objects such as `Value` or `Type`. + +Although there are already many lifetime parameters in the code, making it generic uncovered situations where the borrow-checker was passing only due to the special nature of the LLVM objects manipulated (they are extern pointers). For instance, a additional lifetime parameter had to be added to `LocalAnalyser` in `analyse.rs`, leading to the definition: + +```rust +struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> { + /* ... */ +} +``` + +However, the two most important structures `CodegenCx` and `Builder` are not defined in the backend-agnostic code. Indeed, their content is highly specific of the backend and it makes more sense to leave their definition to the backend implementor than to allow just a narrow spot via a generic field for the backend's context. + +## Traits and interface + +Because they have to be defined by the backend, `CodegenCx` and `Builder` will be the structures implementing all the traits defining the backend's interface. These traits are defined in the folder `rustc_codegen_ssa/traits` and all the backend-agnostic code is parametrized by them. For instance, let us explain how a function in `base.rs` is parametrized: + +```rust +pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx> +) { + /* ... */ +} +``` + +In this signature, we have the two lifetime parameters explained earlier and the master type `Bx` which satisfies the trait `BuilderMethods` corresponding to the interface satisfied by the `Builder` struct. The `BuilderMethods` defines an associated type `Bx::CodegenCx` that itself satisfies the `CodegenMethods` traits implemented by the struct `CodegenCx`. + +On the trait side, here is an example with part of the definition of `BuilderMethods` in `traits/builder.rs`: + +```rust +pub trait BuilderMethods<'a, 'tcx: 'a>: + HasCodegen<'tcx> + + DebugInfoBuilderMethods<'tcx> + + ArgTypeMethods<'tcx> + + AbiBuilderMethods<'tcx> + + IntrinsicCallMethods<'tcx> + + AsmBuilderMethods<'tcx> +{ + fn new_block<'b>( + cx: &'a Self::CodegenCx, + llfn: Self::Value, + name: &'b str + ) -> Self; + /* ... */ + fn cond_br( + &mut self, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, + ); + /* ... */ +} +``` + +Finally, a master structure implementing the `ExtraBackendMethods` trait is used for high-level codegen-driving functions like `codegen_crate` in `base.rs`. For LLVM, it is the empty `LlvmCodegenBackend`. `ExtraBackendMethods` should be implemented by the same structure that implements the `CodegenBackend` defined in `rustc_codegen_utils/codegen_backend.rs`. + +During the traitification process, certain functions have been converted from methods of a local structure to methods of `CodegenCx` or `Builder` and a corresponding `self` parameter has been added. Indeed, LLVM stores information internally that it can access when called through its API. This information does not show up in a Rust data structure carried around when these methods are called. However, when implementing a Rust backend for `rustc`, these methods will need information from `CodegenCx`, hence the additional parameter (unused in the LLVM implementation of the trait). + +## State of the code after the refactoring + +The traits offer an API which is very similar to the API of LLVM. This is not the best solution since LLVM has a very special way of doing things: when addding another backend, the traits definition might be changed in order to offer more flexibility. + +However, the current separation between backend-agnostic and LLVM-specific code has allows the reuse of a significant part of the old `rustc_codegen_llvm`. Here is the new LOC breakdown between backend-agnostic (BA) and LLVM for the most important elements: + +* `back` folder: 3,800 (BA) vs 4,100 (LLVM); +* `mir` folder: 4,400 (BA) vs 0 (LLVM); +* `base.rs`: 1,100 (BA) vs 250 (LLVM); +* `builder.rs`: 1,400 (BA) vs 0 (LLVM); +* `common.rs`: 350 (BA) vs 350 (LLVM); + +The `debuginfo` folder has been left almost untouched by the splitting and is specific to LLVM. Only its high-level features have been traitified. + +The new `traits` folder has 1500 LOC only for trait definitions. Overall, the 27,000 LOC-sized old `rustc_codegen_llvm` code has been split into the new 18,500 LOC-sized new `rustc_codegen_llvm` and the 12,000 LOC-sized `rustc_codegen_ssa`. We can say that this refactoring allowed the reuse of approximately 10,000 LOC that would otherwise have had to be duplicated between the multiple backends of `rustc`. + +The refactored version of `rustc`'s backend introduced no regression over the test suite nor in performance benchmark, which is in coherence with the nature of the refactoring that used only compile-time parametricity (no trait objects). diff --git a/src/librustc_codegen_ssa/back/archive.rs b/src/librustc_codegen_ssa/back/archive.rs new file mode 100644 index 0000000000..b5e1deb0d5 --- /dev/null +++ b/src/librustc_codegen_ssa/back/archive.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::session::Session; + +use std::path::PathBuf; + +pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) + -> PathBuf { + // On Windows, static libraries sometimes show up as libfoo.a and other + // times show up as foo.lib + let oslibname = format!("{}{}{}", + sess.target.target.options.staticlib_prefix, + name, + sess.target.target.options.staticlib_suffix); + let unixlibname = format!("lib{}.a", name); + + for path in search_paths { + debug!("looking for {} inside {:?}", name, path); + let test = path.join(&oslibname); + if test.exists() { return test } + if oslibname != unixlibname { + let test = path.join(&unixlibname); + if test.exists() { return test } + } + } + sess.fatal(&format!("could not find native static library `{}`, \ + perhaps an -L flag is missing?", name)); +} diff --git a/src/librustc_codegen_llvm/back/command.rs b/src/librustc_codegen_ssa/back/command.rs similarity index 100% rename from src/librustc_codegen_llvm/back/command.rs rename to src/librustc_codegen_ssa/back/command.rs diff --git a/src/librustc_codegen_ssa/back/link.rs b/src/librustc_codegen_ssa/back/link.rs new file mode 100644 index 0000000000..24a70dc797 --- /dev/null +++ b/src/librustc_codegen_ssa/back/link.rs @@ -0,0 +1,212 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. + +use rustc::session::{Session, config}; +use rustc::session::search_paths::PathKind; +use rustc::middle::dependency_format::Linkage; +use rustc::middle::cstore::LibSource; +use rustc_target::spec::LinkerFlavor; +use rustc::hir::def_id::CrateNum; + +use super::command::Command; +use CrateInfo; + +use cc::windows_registry; +use std::fs; +use std::path::{Path, PathBuf}; +use std::env; + +pub fn remove(sess: &Session, path: &Path) { + if let Err(e) = fs::remove_file(path) { + sess.err(&format!("failed to remove {}: {}", + path.display(), + e)); + } +} + +// The third parameter is for env vars, used on windows to set up the +// path for MSVC to find its DLLs, and gcc to find its bundled +// toolchain +pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { + let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); + + // If our linker looks like a batch script on Windows then to execute this + // we'll need to spawn `cmd` explicitly. This is primarily done to handle + // emscripten where the linker is `emcc.bat` and needs to be spawned as + // `cmd /c emcc.bat ...`. + // + // This worked historically but is needed manually since #42436 (regression + // was tagged as #42791) and some more info can be found on #44443 for + // emscripten itself. + let mut cmd = match linker.to_str() { + Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), + _ => match flavor { + LinkerFlavor::Lld(f) => Command::lld(linker, f), + LinkerFlavor::Msvc + if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => + { + Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) + }, + _ => Command::new(linker), + } + }; + + // The compiler's sysroot often has some bundled tools, so add it to the + // PATH for the child. + let mut new_path = sess.host_filesearch(PathKind::All) + .get_tools_search_paths(); + let mut msvc_changed_path = false; + if sess.target.target.options.is_like_msvc { + if let Some(ref tool) = msvc_tool { + cmd.args(tool.args()); + for &(ref k, ref v) in tool.env() { + if k == "PATH" { + new_path.extend(env::split_paths(v)); + msvc_changed_path = true; + } else { + cmd.env(k, v); + } + } + } + } + + if !msvc_changed_path { + if let Some(path) = env::var_os("PATH") { + new_path.extend(env::split_paths(&path)); + } + } + cmd.env("PATH", env::join_paths(new_path).unwrap()); + + (linker.to_path_buf(), cmd) +} + +pub fn each_linked_rlib(sess: &Session, + info: &CrateInfo, + f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { + let crates = info.used_crates_static.iter(); + let fmts = sess.dependency_formats.borrow(); + let fmts = fmts.get(&config::CrateType::Executable) + .or_else(|| fmts.get(&config::CrateType::Staticlib)) + .or_else(|| fmts.get(&config::CrateType::Cdylib)) + .or_else(|| fmts.get(&config::CrateType::ProcMacro)); + let fmts = match fmts { + Some(f) => f, + None => return Err("could not find formats for rlibs".to_string()) + }; + for &(cnum, ref path) in crates { + match fmts.get(cnum.as_usize() - 1) { + Some(&Linkage::NotLinked) | + Some(&Linkage::IncludedFromDylib) => continue, + Some(_) => {} + None => return Err("could not find formats for rlibs".to_string()) + } + let name = &info.crate_name[&cnum]; + let path = match *path { + LibSource::Some(ref p) => p, + LibSource::MetadataOnly => { + return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)) + } + LibSource::None => { + return Err(format!("could not find rlib for: `{}`", name)) + } + }; + f(cnum, &path); + } + Ok(()) +} + +/// Returns a boolean indicating whether the specified crate should be ignored +/// during LTO. +/// +/// Crates ignored during LTO are not lumped together in the "massive object +/// file" that we create and are linked in their normal rlib states. See +/// comments below for what crates do not participate in LTO. +/// +/// It's unusual for a crate to not participate in LTO. Typically only +/// compiler-specific and unstable crates have a reason to not participate in +/// LTO. +pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { + // If our target enables builtin function lowering in LLVM then the + // crates providing these functions don't participate in LTO (e.g. + // no_builtins or compiler builtins crates). + !sess.target.target.options.no_builtins && + (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum)) +} + +pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { + fn infer_from( + sess: &Session, + linker: Option, + flavor: Option, + ) -> Option<(PathBuf, LinkerFlavor)> { + match (linker, flavor) { + (Some(linker), Some(flavor)) => Some((linker, flavor)), + // only the linker flavor is known; use the default linker for the selected flavor + (None, Some(flavor)) => Some((PathBuf::from(match flavor { + LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, + LinkerFlavor::Gcc => "cc", + LinkerFlavor::Ld => "ld", + LinkerFlavor::Msvc => "link.exe", + LinkerFlavor::Lld(_) => "lld", + }), flavor)), + (Some(linker), None) => { + let stem = if linker.extension().and_then(|ext| ext.to_str()) == Some("exe") { + linker.file_stem().and_then(|stem| stem.to_str()) + } else { + linker.to_str() + }.unwrap_or_else(|| { + sess.fatal("couldn't extract file stem from specified linker"); + }).to_owned(); + + let flavor = if stem == "emcc" { + LinkerFlavor::Em + } else if stem == "gcc" || stem.ends_with("-gcc") { + LinkerFlavor::Gcc + } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { + LinkerFlavor::Ld + } else if stem == "link" || stem == "lld-link" { + LinkerFlavor::Msvc + } else if stem == "lld" || stem == "rust-lld" { + LinkerFlavor::Lld(sess.target.target.options.lld_flavor) + } else { + // fall back to the value in the target spec + sess.target.target.linker_flavor + }; + + Some((linker, flavor)) + }, + (None, None) => None, + } + } + + // linker and linker flavor specified via command line have precedence over what the target + // specification specifies + if let Some(ret) = infer_from( + sess, + sess.opts.cg.linker.clone(), + sess.opts.debugging_opts.linker_flavor, + ) { + return ret; + } + + if let Some(ret) = infer_from( + sess, + sess.target.target.options.linker.clone().map(PathBuf::from), + Some(sess.target.target.linker_flavor), + ) { + return ret; + } + + bug!("Not enough information provided to determine how to invoke the linker"); +} diff --git a/src/librustc_codegen_llvm/back/linker.rs b/src/librustc_codegen_ssa/back/linker.rs similarity index 94% rename from src/librustc_codegen_llvm/back/linker.rs rename to src/librustc_codegen_ssa/back/linker.rs index 1998afa80e..f3cc344254 100644 --- a/src/librustc_codegen_llvm/back/linker.rs +++ b/src/librustc_codegen_ssa/back/linker.rs @@ -8,6 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use super::symbol_export; +use super::command::Command; +use super::archive; + use rustc_data_structures::fx::FxHashMap; use std::ffi::{OsStr, OsString}; use std::fs::{self, File}; @@ -15,9 +19,6 @@ use std::io::prelude::*; use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; -use back::archive; -use back::command::Command; -use back::symbol_export; use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; use rustc::middle::dependency_format::Linkage; use rustc::session::Session; @@ -26,7 +27,6 @@ use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, use rustc::ty::TyCtxt; use rustc_target::spec::{LinkerFlavor, LldFlavor}; use serialize::{json, Encoder}; -use llvm_util; /// For all the linkers we support, and information they might /// need out of the shared crate context before we get rid of it. @@ -43,10 +43,13 @@ impl LinkerInfo { } } - pub fn to_linker<'a>(&'a self, - cmd: Command, - sess: &'a Session, - flavor: LinkerFlavor) -> Box { + pub fn to_linker<'a>( + &'a self, + cmd: Command, + sess: &'a Session, + flavor: LinkerFlavor, + target_cpu: &'a str, + ) -> Box { match flavor { LinkerFlavor::Lld(LldFlavor::Link) | LinkerFlavor::Msvc => { @@ -70,6 +73,7 @@ impl LinkerInfo { info: self, hinted_static: false, is_ld: false, + target_cpu, }) as Box } @@ -82,6 +86,7 @@ impl LinkerInfo { info: self, hinted_static: false, is_ld: true, + target_cpu, }) as Box } @@ -144,6 +149,7 @@ pub struct GccLinker<'a> { hinted_static: bool, // Keeps track of the current hinting mode. // Link as ld is_ld: bool, + target_cpu: &'a str, } impl<'a> GccLinker<'a> { @@ -204,14 +210,15 @@ impl<'a> GccLinker<'a> { }; self.linker_arg(&format!("-plugin-opt={}", opt_level)); - self.linker_arg(&format!("-plugin-opt=mcpu={}", llvm_util::target_cpu(self.sess))); + let target_cpu = self.target_cpu; + self.linker_arg(&format!("-plugin-opt=mcpu={}", target_cpu)); } } impl<'a> Linker for GccLinker<'a> { - fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}",lib)); } + fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}", lib)); } fn link_staticlib(&mut self, lib: &str) { - self.hint_static(); self.cmd.arg(format!("-l{}",lib)); + self.hint_static(); self.cmd.arg(format!("-l{}", lib)); } fn link_rlib(&mut self, lib: &Path) { self.hint_static(); self.cmd.arg(lib); } fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); } @@ -228,7 +235,7 @@ impl<'a> Linker for GccLinker<'a> { fn link_rust_dylib(&mut self, lib: &str, _path: &Path) { self.hint_dynamic(); - self.cmd.arg(format!("-l{}",lib)); + self.cmd.arg(format!("-l{}", lib)); } fn link_framework(&mut self, framework: &str) { @@ -246,7 +253,7 @@ impl<'a> Linker for GccLinker<'a> { self.hint_static(); let target = &self.sess.target.target; if !target.options.is_like_osx { - self.linker_arg("--whole-archive").cmd.arg(format!("-l{}",lib)); + self.linker_arg("--whole-archive").cmd.arg(format!("-l{}", lib)); self.linker_arg("--no-whole-archive"); } else { // -force_load is the macOS equivalent of --whole-archive, but it @@ -328,17 +335,13 @@ impl<'a> Linker for GccLinker<'a> { } fn debuginfo(&mut self) { - match self.sess.opts.debuginfo { - DebugInfo::None => { - // If we are building without debuginfo enabled and we were called with - // `-Zstrip-debuginfo-if-disabled=yes`, tell the linker to strip any debuginfo - // found when linking to get rid of symbols from libstd. - match self.sess.opts.debugging_opts.strip_debuginfo_if_disabled { - Some(true) => { self.linker_arg("-S"); }, - _ => {}, - } - }, - _ => {}, + if let DebugInfo::None = self.sess.opts.debuginfo { + // If we are building without debuginfo enabled and we were called with + // `-Zstrip-debuginfo-if-disabled=yes`, tell the linker to strip any debuginfo + // found when linking to get rid of symbols from libstd. + if let Some(true) = self.sess.opts.debugging_opts.strip_debuginfo_if_disabled { + self.linker_arg("-S"); + } }; } @@ -358,8 +361,7 @@ impl<'a> Linker for GccLinker<'a> { // purely to support rustbuild right now, we should get a more // principled solution at some point to force the compiler to pass // the right `-Wl,-install_name` with an `@rpath` in it. - if self.sess.opts.cg.rpath || - self.sess.opts.debugging_opts.osx_rpath_install_name { + if self.sess.opts.cg.rpath || self.sess.opts.debugging_opts.osx_rpath_install_name { self.linker_arg("-install_name"); let mut v = OsString::from("@rpath/"); v.push(out_filename.file_name().unwrap()); @@ -446,9 +448,8 @@ impl<'a> Linker for GccLinker<'a> { fn finalize(&mut self) -> Command { self.hint_dynamic(); // Reset to default before returning the composed command line. - let mut cmd = Command::new(""); - ::std::mem::swap(&mut cmd, &mut self.cmd); - cmd + + ::std::mem::replace(&mut self.cmd, Command::new("")) } fn group_start(&mut self) { @@ -604,7 +605,7 @@ impl<'a> Linker for MsvcLinker<'a> { // from the CodeView line tables in the object files. self.cmd.arg("/DEBUG"); - // This will cause the Microsoft linker to embed .natvis info into the the PDB file + // This will cause the Microsoft linker to embed .natvis info into the PDB file let sysroot = self.sess.sysroot(); let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { @@ -700,9 +701,7 @@ impl<'a> Linker for MsvcLinker<'a> { } fn finalize(&mut self) -> Command { - let mut cmd = Command::new(""); - ::std::mem::swap(&mut cmd, &mut self.cmd); - cmd + ::std::mem::replace(&mut self.cmd, Command::new("")) } // MSVC doesn't need group indicators @@ -850,7 +849,7 @@ impl<'a> Linker for EmLinker<'a> { let res = encoder.emit_seq(symbols.len(), |encoder| { for (i, sym) in symbols.iter().enumerate() { encoder.emit_seq_elt(i, |encoder| { - encoder.emit_str(&("_".to_string() + sym)) + encoder.emit_str(&("_".to_owned() + sym)) })?; } Ok(()) @@ -870,9 +869,7 @@ impl<'a> Linker for EmLinker<'a> { } fn finalize(&mut self) -> Command { - let mut cmd = Command::new(""); - ::std::mem::swap(&mut cmd, &mut self.cmd); - cmd + ::std::mem::replace(&mut self.cmd, Command::new("")) } // Appears not necessary on Emscripten @@ -884,35 +881,6 @@ impl<'a> Linker for EmLinker<'a> { } } -fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { - let mut symbols = Vec::new(); - - let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); - for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - - let formats = tcx.sess.dependency_formats.borrow(); - let deps = formats[&crate_type].iter(); - - for (index, dep_format) in deps.enumerate() { - let cnum = CrateNum::new(index + 1); - // For each dependency that we are linking to statically ... - if *dep_format == Linkage::Static { - // ... we add its symbol list to our export list. - for &(symbol, level) in tcx.exported_symbols(cnum).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - } - } - - symbols -} - pub struct WasmLd<'a> { cmd: Command, sess: &'a Session, @@ -1069,9 +1037,18 @@ impl<'a> Linker for WasmLd<'a> { // indicative of bugs, let's prevent them. self.cmd.arg("--fatal-warnings"); - let mut cmd = Command::new(""); - ::std::mem::swap(&mut cmd, &mut self.cmd); - cmd + // The symbol visibility story is a bit in flux right now with LLD. + // It's... not entirely clear to me what's going on, but this looks to + // make everything work when `export_symbols` isn't otherwise called for + // things like executables. + self.cmd.arg("--export-dynamic"); + + // LLD only implements C++-like demangling, which doesn't match our own + // mangling scheme. Tell LLD to not demangle anything and leave it up to + // us to demangle these symbols later. + self.cmd.arg("--no-demangle"); + + ::std::mem::replace(&mut self.cmd, Command::new("")) } // Not needed for now with LLD @@ -1082,3 +1059,36 @@ impl<'a> Linker for WasmLd<'a> { // Do nothing for now } } + +fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { + if let Some(ref exports) = tcx.sess.target.target.options.override_export_symbols { + return exports.clone() + } + + let mut symbols = Vec::new(); + + let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); + for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + + let formats = tcx.sess.dependency_formats.borrow(); + let deps = formats[&crate_type].iter(); + + for (index, dep_format) in deps.enumerate() { + let cnum = CrateNum::new(index + 1); + // For each dependency that we are linking to statically ... + if *dep_format == Linkage::Static { + // ... we add its symbol list to our export list. + for &(symbol, level) in tcx.exported_symbols(cnum).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + } + } + + symbols +} diff --git a/src/librustc_codegen_ssa/back/lto.rs b/src/librustc_codegen_ssa/back/lto.rs new file mode 100644 index 0000000000..8d03edca00 --- /dev/null +++ b/src/librustc_codegen_ssa/back/lto.rs @@ -0,0 +1,122 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::write::CodegenContext; +use traits::*; +use ModuleCodegen; + +use rustc::util::time_graph::Timeline; +use rustc_errors::FatalError; + +use std::sync::Arc; +use std::ffi::CString; + +pub struct ThinModule { + pub shared: Arc>, + pub idx: usize, +} + +impl ThinModule { + pub fn name(&self) -> &str { + self.shared.module_names[self.idx].to_str().unwrap() + } + + pub fn cost(&self) -> u64 { + // Yes, that's correct, we're using the size of the bytecode as an + // indicator for how costly this codegen unit is. + self.data().len() as u64 + } + + pub fn data(&self) -> &[u8] { + let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); + a.unwrap_or_else(|| { + let len = self.shared.thin_buffers.len(); + self.shared.serialized_modules[self.idx - len].data() + }) + } +} + +pub struct ThinShared { + pub data: B::ThinData, + pub thin_buffers: Vec, + pub serialized_modules: Vec>, + pub module_names: Vec, +} + + +pub enum LtoModuleCodegen { + Fat { + module: Option>, + _serialized_bitcode: Vec>, + }, + + Thin(ThinModule), +} + +impl LtoModuleCodegen { + pub fn name(&self) -> &str { + match *self { + LtoModuleCodegen::Fat { .. } => "everything", + LtoModuleCodegen::Thin(ref m) => m.name(), + } + } + + /// Optimize this module within the given codegen context. + /// + /// This function is unsafe as it'll return a `ModuleCodegen` still + /// points to LLVM data structures owned by this `LtoModuleCodegen`. + /// It's intended that the module returned is immediately code generated and + /// dropped, and then this LTO module is dropped. + pub unsafe fn optimize( + &mut self, + cgcx: &CodegenContext, + timeline: &mut Timeline + ) -> Result, FatalError> { + match *self { + LtoModuleCodegen::Fat { ref mut module, .. } => { + let module = module.take().unwrap(); + { + let config = cgcx.config(module.kind); + B::run_lto_pass_manager(cgcx, &module, config, false); + timeline.record("fat-done"); + } + Ok(module) + } + LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin, timeline), + } + } + + /// A "gauge" of how costly it is to optimize this module, used to sort + /// biggest modules first. + pub fn cost(&self) -> u64 { + match *self { + // Only one module with fat LTO, so the cost doesn't matter. + LtoModuleCodegen::Fat { .. } => 0, + LtoModuleCodegen::Thin(ref m) => m.cost(), + } + } +} + + +pub enum SerializedModule { + Local(M), + FromRlib(Vec), + FromUncompressedFile(memmap::Mmap), +} + +impl SerializedModule { + pub fn data(&self) -> &[u8] { + match *self { + SerializedModule::Local(ref m) => m.data(), + SerializedModule::FromRlib(ref m) => m, + SerializedModule::FromUncompressedFile(ref m) => m, + } + } +} diff --git a/src/librustc_codegen_ssa/back/mod.rs b/src/librustc_codegen_ssa/back/mod.rs new file mode 100644 index 0000000000..3d7ead74d1 --- /dev/null +++ b/src/librustc_codegen_ssa/back/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub mod write; +pub mod linker; +pub mod lto; +pub mod link; +pub mod command; +pub mod symbol_export; +pub mod archive; diff --git a/src/librustc_codegen_llvm/back/symbol_export.rs b/src/librustc_codegen_ssa/back/symbol_export.rs similarity index 94% rename from src/librustc_codegen_llvm/back/symbol_export.rs rename to src/librustc_codegen_ssa/back/symbol_export.rs index 6b1b0b94fd..d25917e51b 100644 --- a/src/librustc_codegen_llvm/back/symbol_export.rs +++ b/src/librustc_codegen_ssa/back/symbol_export.rs @@ -11,7 +11,7 @@ use rustc_data_structures::sync::Lrc; use std::sync::Arc; -use monomorphize::Instance; +use rustc::ty::Instance; use rustc::hir; use rustc::hir::Node; use rustc::hir::CodegenFnAttrFlags; @@ -47,11 +47,10 @@ fn crate_export_threshold(crate_type: config::CrateType) -> SymbolExportLevel { } } -pub fn crates_export_threshold(crate_types: &[config::CrateType]) - -> SymbolExportLevel { - if crate_types.iter().any(|&crate_type| { - crate_export_threshold(crate_type) == SymbolExportLevel::Rust - }) { +pub fn crates_export_threshold(crate_types: &[config::CrateType]) -> SymbolExportLevel { + if crate_types.iter().any(|&crate_type| + crate_export_threshold(crate_type) == SymbolExportLevel::Rust) + { SymbolExportLevel::Rust } else { SymbolExportLevel::C @@ -65,7 +64,7 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, assert_eq!(cnum, LOCAL_CRATE); if !tcx.sess.opts.output_types.should_codegen() { - return Lrc::new(DefIdMap()) + return Default::default(); } // Check to see if this crate is a "special runtime crate". These @@ -158,7 +157,7 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }) .collect(); - if let Some(id) = *tcx.sess.derive_registrar_fn.get() { + if let Some(id) = *tcx.sess.proc_macro_decls_static.get() { let def_id = tcx.hir.local_def_id(id); reachable_non_generics.insert(def_id, SymbolExportLevel::C); } @@ -230,10 +229,11 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "__llvm_profile_raw_version", "__llvm_profile_filename", ]; - for sym in &PROFILER_WEAK_SYMBOLS { + + symbols.extend(PROFILER_WEAK_SYMBOLS.iter().map(|sym| { let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym)); - symbols.push((exported_symbol, SymbolExportLevel::C)); - } + (exported_symbol, SymbolExportLevel::C) + })); } if tcx.sess.crate_types.borrow().contains(&config::CrateType::Dylib) { @@ -300,7 +300,7 @@ fn upstream_monomorphizations_provider<'a, 'tcx>( let cnums = tcx.all_crate_nums(LOCAL_CRATE); - let mut instances: DefIdMap> = DefIdMap(); + let mut instances: DefIdMap> = Default::default(); let cnum_stable_ids: IndexVec = { let mut cnum_stable_ids = IndexVec::from_elem_n(Fingerprint::ZERO, @@ -359,7 +359,7 @@ fn is_unreachable_local_definition_provider(tcx: TyCtxt, def_id: DefId) -> bool !tcx.reachable_set(LOCAL_CRATE).0.contains(&node_id) } else { bug!("is_unreachable_local_definition called with non-local DefId: {:?}", - def_id) + def_id) } } @@ -388,6 +388,16 @@ fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel { codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL); if is_extern && !std_internal { + // Emscripten cannot export statics, so reduce their export level here + if tcx.sess.target.target.options.is_like_emscripten { + if let Some(Node::Item(&hir::Item { + node: hir::ItemKind::Static(..), + .. + })) = tcx.hir.get_if_local(sym_def_id) { + return SymbolExportLevel::Rust; + } + } + SymbolExportLevel::C } else { SymbolExportLevel::Rust diff --git a/src/librustc_codegen_ssa/back/write.rs b/src/librustc_codegen_ssa/back/write.rs new file mode 100644 index 0000000000..46aee5339b --- /dev/null +++ b/src/librustc_codegen_ssa/back/write.rs @@ -0,0 +1,1853 @@ +// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo, CodegenResults, + RLIB_BYTECODE_EXTENSION}; +use super::linker::LinkerInfo; +use super::lto::{self, SerializedModule}; +use super::link::{self, remove, get_linker}; +use super::command::Command; +use super::symbol_export::ExportedSymbols; + +use memmap; +use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, + in_incr_comp_dir, in_incr_comp_dir_sess}; +use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; +use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; +use rustc::middle::cstore::EncodedMetadata; +use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::Session; +use rustc::util::nodemap::FxHashMap; +use rustc::util::time_graph::{self, TimeGraph, Timeline}; +use traits::*; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::util::common::{time_depth, set_time_depth, print_time_passes_entry}; +use rustc_fs_util::link_or_copy; +use rustc_data_structures::svh::Svh; +use rustc_errors::{Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; +use rustc_errors::emitter::{Emitter}; +use syntax::attr; +use syntax::ext::hygiene::Mark; +use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; +use jobserver::{Client, Acquired}; + +use std::any::Any; +use std::fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::Arc; +use std::sync::mpsc::{channel, Sender, Receiver}; +use std::time::Instant; +use std::thread; + +const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; + +/// Module-specific configuration for `optimize_and_codegen`. +pub struct ModuleConfig { + /// Names of additional optimization passes to run. + pub passes: Vec, + /// Some(level) to optimize at a certain level, or None to run + /// absolutely no optimizations (used for the metadata module). + pub opt_level: Option, + + /// Some(level) to optimize binary size, or None to not affect program size. + pub opt_size: Option, + + pub pgo_gen: Option, + pub pgo_use: String, + + // Flags indicating which outputs to produce. + pub emit_pre_thin_lto_bc: bool, + pub emit_no_opt_bc: bool, + pub emit_bc: bool, + pub emit_bc_compressed: bool, + pub emit_lto_bc: bool, + pub emit_ir: bool, + pub emit_asm: bool, + pub emit_obj: bool, + // Miscellaneous flags. These are mostly copied from command-line + // options. + pub verify_llvm_ir: bool, + pub no_prepopulate_passes: bool, + pub no_builtins: bool, + pub time_passes: bool, + pub vectorize_loop: bool, + pub vectorize_slp: bool, + pub merge_functions: bool, + pub inline_threshold: Option, + // Instead of creating an object file by doing LLVM codegen, just + // make the object file bitcode. Provides easy compatibility with + // emscripten's ecc compiler, when used as the linker. + pub obj_is_bitcode: bool, + pub no_integrated_as: bool, + pub embed_bitcode: bool, + pub embed_bitcode_marker: bool, +} + +impl ModuleConfig { + fn new(passes: Vec) -> ModuleConfig { + ModuleConfig { + passes, + opt_level: None, + opt_size: None, + + pgo_gen: None, + pgo_use: String::new(), + + emit_no_opt_bc: false, + emit_pre_thin_lto_bc: false, + emit_bc: false, + emit_bc_compressed: false, + emit_lto_bc: false, + emit_ir: false, + emit_asm: false, + emit_obj: false, + obj_is_bitcode: false, + embed_bitcode: false, + embed_bitcode_marker: false, + no_integrated_as: false, + + verify_llvm_ir: false, + no_prepopulate_passes: false, + no_builtins: false, + time_passes: false, + vectorize_loop: false, + vectorize_slp: false, + merge_functions: false, + inline_threshold: None + } + } + + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { + self.verify_llvm_ir = sess.verify_llvm_ir(); + self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; + self.no_builtins = no_builtins || sess.target.target.options.no_builtins; + self.time_passes = sess.time_passes(); + self.inline_threshold = sess.opts.cg.inline_threshold; + self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || + sess.opts.debugging_opts.cross_lang_lto.enabled(); + let embed_bitcode = sess.target.target.options.embed_bitcode || + sess.opts.debugging_opts.embed_bitcode; + if embed_bitcode { + match sess.opts.optimize { + config::OptLevel::No | + config::OptLevel::Less => { + self.embed_bitcode_marker = embed_bitcode; + } + _ => self.embed_bitcode = embed_bitcode, + } + } + + // Copy what clang does by turning on loop vectorization at O2 and + // slp vectorization at O3. Otherwise configure other optimization aspects + // of this pass manager builder. + // Turn off vectorization for emscripten, as it's not very well supported. + self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && + (sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive) && + !sess.target.target.options.is_like_emscripten; + + self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && + sess.opts.optimize == config::OptLevel::Aggressive && + !sess.target.target.options.is_like_emscripten; + + self.merge_functions = sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive; + } + + pub fn bitcode_needed(&self) -> bool { + self.emit_bc || self.obj_is_bitcode + || self.emit_bc_compressed || self.embed_bitcode + } +} + +/// Assembler name and command used by codegen when no_integrated_as is enabled +pub struct AssemblerCommand { + name: PathBuf, + cmd: Command, +} + +// HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`. +pub struct TargetMachineFactory( + pub Arc Result + Send + Sync>, +); + +impl Clone for TargetMachineFactory { + fn clone(&self) -> Self { + TargetMachineFactory(self.0.clone()) + } +} + +/// Additional resources used by optimize_and_codegen (not module specific) +#[derive(Clone)] +pub struct CodegenContext { + // Resources needed when running LTO + pub backend: B, + pub time_passes: bool, + pub lto: Lto, + pub no_landing_pads: bool, + pub save_temps: bool, + pub fewer_names: bool, + pub exported_symbols: Option>, + pub opts: Arc, + pub crate_types: Vec, + pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, + pub output_filenames: Arc, + pub regular_module_config: Arc, + pub metadata_module_config: Arc, + pub allocator_module_config: Arc, + pub tm_factory: TargetMachineFactory, + pub msvc_imps_needed: bool, + pub target_pointer_width: String, + pub debuginfo: config::DebugInfo, + + // Number of cgus excluding the allocator/metadata modules + pub total_cgus: usize, + // Handler to use for diagnostics produced during codegen. + pub diag_emitter: SharedEmitter, + // LLVM passes added by plugins. + pub plugin_passes: Vec, + // LLVM optimizations for which we want to print remarks. + pub remark: Passes, + // Worker thread number + pub worker: usize, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + pub incr_comp_session_dir: Option, + // Used to update CGU re-use information during the thinlto phase. + pub cgu_reuse_tracker: CguReuseTracker, + // Channel back to the main control thread to send messages to + pub coordinator_send: Sender>, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + pub time_graph: Option, + // The assembler command if no_integrated_as option is enabled, None otherwise + pub assembler_cmd: Option> +} + +impl CodegenContext { + pub fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } + + pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { + match kind { + ModuleKind::Regular => &self.regular_module_config, + ModuleKind::Metadata => &self.metadata_module_config, + ModuleKind::Allocator => &self.allocator_module_config, + } + } +} + +fn generate_lto_work( + cgcx: &CodegenContext, + modules: Vec>, + import_only_modules: Vec<(SerializedModule, WorkProduct)> +) -> Vec<(WorkItem, u64)> { + let mut timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + "generate lto") + }).unwrap_or(Timeline::noop()); + let (lto_modules, copy_jobs) = B::run_lto(cgcx, modules, import_only_modules, &mut timeline) + .unwrap_or_else(|e| e.raise()); + + let lto_modules = lto_modules.into_iter().map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }); + + let copy_jobs = copy_jobs.into_iter().map(|wp| { + (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { + name: wp.cgu_name.clone(), + source: wp, + }), 0) + }); + + lto_modules.chain(copy_jobs).collect() +} + +pub struct CompiledModules { + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, +} + +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateType::Rlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} + +fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { + if sess.opts.incremental.is_none() { + return false + } + + match sess.lto() { + Lto::Fat | + Lto::No => false, + Lto::Thin | + Lto::ThinLocal => true, + } +} + +pub fn start_async_codegen( + backend: B, + tcx: TyCtxt, + time_graph: Option, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize +) -> OngoingCodegen { + let sess = tcx.sess; + let crate_name = tcx.crate_name(LOCAL_CRATE); + let crate_hash = tcx.crate_hash(LOCAL_CRATE); + let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); + let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let linker_info = LinkerInfo::new(tcx); + let crate_info = CrateInfo::new(tcx); + + // Figure out what we actually need to build. + let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(vec![]); + let mut allocator_config = ModuleConfig::new(vec![]); + + if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { + match *sanitizer { + Sanitizer::Address => { + modules_config.passes.push("asan".to_owned()); + modules_config.passes.push("asan-module".to_owned()); + } + Sanitizer::Memory => { + modules_config.passes.push("msan".to_owned()) + } + Sanitizer::Thread => { + modules_config.passes.push("tsan".to_owned()) + } + _ => {} + } + } + + if sess.opts.debugging_opts.profile { + modules_config.passes.push("insert-gcov-profiling".to_owned()) + } + + modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); + modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); + + modules_config.opt_level = Some(sess.opts.optimize); + modules_config.opt_size = Some(sess.opts.optimize); + + // Save all versions of the bytecode if we're saving our temporaries. + if sess.opts.cg.save_temps { + modules_config.emit_no_opt_bc = true; + modules_config.emit_pre_thin_lto_bc = true; + modules_config.emit_bc = true; + modules_config.emit_lto_bc = true; + metadata_config.emit_bc = true; + allocator_config.emit_bc = true; + } + + // Emit compressed bitcode files for the crate if we're emitting an rlib. + // Whenever an rlib is created, the bitcode is inserted into the archive in + // order to allow LTO against it. + if need_crate_bitcode_for_rlib(sess) { + modules_config.emit_bc_compressed = true; + allocator_config.emit_bc_compressed = true; + } + + modules_config.emit_pre_thin_lto_bc = + need_pre_thin_lto_bitcode_for_incr_comp(sess); + + modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + tcx.sess.target.target.options.no_integrated_as; + + for output_type in sess.opts.output_types.keys() { + match *output_type { + OutputType::Bitcode => { modules_config.emit_bc = true; } + OutputType::LlvmAssembly => { modules_config.emit_ir = true; } + OutputType::Assembly => { + modules_config.emit_asm = true; + // If we're not using the LLVM assembler, this function + // could be invoked specially with output_type_assembly, so + // in this case we still want the metadata object file. + if !sess.opts.output_types.contains_key(&OutputType::Assembly) { + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + } + } + OutputType::Object => { modules_config.emit_obj = true; } + OutputType::Metadata => { metadata_config.emit_obj = true; } + OutputType::Exe => { + modules_config.emit_obj = true; + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + }, + OutputType::Mir => {} + OutputType::DepInfo => {} + } + } + + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); + + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; + + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (codegen_worker_send, codegen_worker_receive) = channel(); + + let coordinator_thread = start_executing_work(backend.clone(), + tcx, + &crate_info, + shared_emitter, + codegen_worker_send, + coordinator_receive, + total_cgus, + sess.jobserver.clone(), + time_graph.clone(), + Arc::new(modules_config), + Arc::new(metadata_config), + Arc::new(allocator_config)); + + OngoingCodegen { + backend, + crate_name, + crate_hash, + metadata, + windows_subsystem, + linker_info, + crate_info, + + time_graph, + coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), + codegen_worker_receive, + shared_emitter_main, + future: coordinator_thread, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + } +} + +fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( + sess: &Session, + compiled_modules: &CompiledModules, +) -> FxHashMap { + let mut work_products = FxHashMap::default(); + + if sess.opts.incremental.is_none() { + return work_products; + } + + for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { + let mut files = vec![]; + + if let Some(ref path) = module.object { + files.push((WorkProductFileKind::Object, path.clone())); + } + if let Some(ref path) = module.bytecode { + files.push((WorkProductFileKind::Bytecode, path.clone())); + } + if let Some(ref path) = module.bytecode_compressed { + files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); + } + + if let Some((id, product)) = + copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { + work_products.insert(id, product); + } + } + + work_products +} + +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + let mut user_wants_bitcode = false; + let mut user_wants_objects = false; + + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); + } + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if compiled_modules.modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&compiled_modules.modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } + } + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in crate_output.outputs.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} + } + } + + // Clean up unwanted temporary files. + + // We create the following files by default: + // - #crate#.#module-name#.bc + // - #crate#.#module-name#.o + // - #crate#.crate.metadata.bc + // - #crate#.crate.metadata.o + // - #crate#.o (linked from crate.##.o) + // - #crate#.bc (copied from crate.##.bc) + // We may create additional files if requested by the user (through + // `-C save-temps` or `--emit=` flags). + + if !sess.opts.cg.save_temps { + // Remove the temporary .#module-name#.o objects. If the user didn't + // explicitly request bitcode (with --emit=bc), and the bitcode is not + // needed for building an rlib, then we must remove .#module-name#.bc as + // well. + + // Specific rules for keeping .#module-name#.bc: + // - If the user requested bitcode (`user_wants_bitcode`), and + // codegen_units > 1, then keep it. + // - If the user requested bitcode but codegen_units == 1, then we + // can toss .#module-name#.bc because we copied it to .bc earlier. + // - If we're not building an rlib and the user didn't request + // bitcode, then delete .#module-name#.bc. + // If you change how this works, also update back::link::link_rlib, + // where .#module-name#.bc files are (maybe) deleted after making an + // rlib. + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + + let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; + + let keep_numbered_objects = needs_crate_object || + (user_wants_objects && sess.codegen_units() > 1); + + for module in compiled_modules.modules.iter() { + if let Some(ref path) = module.object { + if !keep_numbered_objects { + remove(sess, path); + } + } + + if let Some(ref path) = module.bytecode { + if !keep_numbered_bitcode { + remove(sess, path); + } + } + } + + if !user_wants_bitcode { + if let Some(ref path) = compiled_modules.metadata_module.bytecode { + remove(sess, &path); + } + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if let Some(ref path) = allocator_module.bytecode { + remove(sess, path); + } + } + } + } + + // We leave the following files around by default: + // - #crate#.o + // - #crate#.crate.metadata.o + // - #crate#.bc + // These are used in linking steps and will be cleaned up afterward. +} + +pub fn dump_incremental_data(_codegen_results: &CodegenResults) { + // FIXME(mw): This does not work at the moment because the situation has + // become more complicated due to incremental LTO. Now a CGU + // can have more than two caching states. + // println!("[incremental] Re-using {} out of {} modules", + // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), + // codegen_results.modules.len()); +} + +pub enum WorkItem { + /// Optimize a newly codegened, totally unoptimized module. + Optimize(ModuleCodegen), + /// Copy the post-LTO artifacts from the incremental cache to the output + /// directory. + CopyPostLtoArtifacts(CachedModuleCodegen), + /// Perform (Thin)LTO on the given module. + LTO(lto::LtoModuleCodegen), +} + +impl WorkItem { + pub fn module_kind(&self) -> ModuleKind { + match *self { + WorkItem::Optimize(ref m) => m.kind, + WorkItem::CopyPostLtoArtifacts(_) | + WorkItem::LTO(_) => ModuleKind::Regular, + } + } + + pub fn name(&self) -> String { + match *self { + WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), + WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), + WorkItem::LTO(ref m) => format!("lto: {}", m.name()), + } + } +} + +enum WorkItemResult { + Compiled(CompiledModule), + NeedsLTO(ModuleCodegen), +} + +fn execute_work_item( + cgcx: &CodegenContext, + work_item: WorkItem, + timeline: &mut Timeline +) -> Result, FatalError> { + let module_config = cgcx.config(work_item.module_kind()); + + match work_item { + WorkItem::Optimize(module) => { + execute_optimize_work_item(cgcx, module, module_config, timeline) + } + WorkItem::CopyPostLtoArtifacts(module) => { + execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) + } + WorkItem::LTO(module) => { + execute_lto_work_item(cgcx, module, module_config, timeline) + } + } +} + +fn execute_optimize_work_item( + cgcx: &CodegenContext, + module: ModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + B::optimize(cgcx, &diag_handler, &module, module_config, timeline)?; + } + + let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); + + // After we've done the initial round of optimizations we need to + // decide whether to synchronously codegen this module or ship it + // back to the coordinator thread for further LTO processing (which + // has to wait for all the initial modules to be optimized). + // + // Here we dispatch based on the `cgcx.lto` and kind of module we're + // codegenning... + let needs_lto = match cgcx.lto { + Lto::No => false, + + // If the linker does LTO, we don't have to do it. Note that we + // keep doing full LTO, if it is requested, as not to break the + // assumption that the output will be a single module. + Lto::Thin | Lto::ThinLocal if linker_does_lto => false, + + // Here we've got a full crate graph LTO requested. We ignore + // this, however, if the crate type is only an rlib as there's + // no full crate graph to process, that'll happen later. + // + // This use case currently comes up primarily for targets that + // require LTO so the request for LTO is always unconditionally + // passed down to the backend, but we don't actually want to do + // anything about it yet until we've got a final product. + Lto::Fat | Lto::Thin => { + cgcx.crate_types.len() != 1 || + cgcx.crate_types[0] != config::CrateType::Rlib + } + + // When we're automatically doing ThinLTO for multi-codegen-unit + // builds we don't actually want to LTO the allocator modules if + // it shows up. This is due to various linker shenanigans that + // we'll encounter later. + Lto::ThinLocal => { + module.kind != ModuleKind::Allocator + } + }; + + // Metadata modules never participate in LTO regardless of the lto + // settings. + let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; + + if needs_lto { + Ok(WorkItemResult::NeedsLTO(module)) + } else { + let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config, timeline)? }; + Ok(WorkItemResult::Compiled(module)) + } +} + +fn execute_copy_from_cache_work_item( + cgcx: &CodegenContext, + module: CachedModuleCodegen, + module_config: &ModuleConfig, + _: &mut Timeline +) -> Result, FatalError> { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let mut object = None; + let mut bytecode = None; + let mut bytecode_compressed = None; + for (kind, saved_file) in &module.source.saved_files { + let obj_out = match kind { + WorkProductFileKind::Object => { + let path = cgcx.output_filenames.temp_path(OutputType::Object, + Some(&module.name)); + object = Some(path.clone()); + path + } + WorkProductFileKind::Bytecode => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)); + bytecode = Some(path.clone()); + path + } + WorkProductFileKind::BytecodeCompressed => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)) + .with_extension(RLIB_BYTECODE_EXTENSION); + bytecode_compressed = Some(path.clone()); + path + } + }; + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + module.name, + source_file, + obj_out.display()); + if let Err(err) = link_or_copy(&source_file, &obj_out) { + let diag_handler = cgcx.create_diag_handler(); + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); + } + } + + assert_eq!(object.is_some(), module_config.emit_obj); + assert_eq!(bytecode.is_some(), module_config.emit_bc); + assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); + + Ok(WorkItemResult::Compiled(CompiledModule { + name: module.name, + kind: ModuleKind::Regular, + object, + bytecode, + bytecode_compressed, + })) +} + +fn execute_lto_work_item( + cgcx: &CodegenContext, + mut module: lto::LtoModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + let module = module.optimize(cgcx, timeline)?; + let module = B::codegen(cgcx, &diag_handler, module, module_config, timeline)?; + Ok(WorkItemResult::Compiled(module)) + } +} + +pub enum Message { + Token(io::Result), + NeedsLTO { + result: ModuleCodegen, + worker_id: usize, + }, + Done { + result: Result, + worker_id: usize, + }, + CodegenDone { + llvm_work_item: WorkItem, + cost: u64, + }, + AddImportOnlyModule { + module_data: SerializedModule, + work_product: WorkProduct, + }, + CodegenComplete, + CodegenItem, + CodegenAborted, +} + +struct Diagnostic { + msg: String, + code: Option, + lvl: Level, +} + +#[derive(PartialEq, Clone, Copy, Debug)] +enum MainThreadWorkerState { + Idle, + Codegenning, + LLVMing, +} + +fn start_executing_work( + backend: B, + tcx: TyCtxt, + crate_info: &CrateInfo, + shared_emitter: SharedEmitter, + codegen_worker_send: Sender>, + coordinator_receive: Receiver>, + total_cgus: usize, + jobserver: Client, + time_graph: Option, + modules_config: Arc, + metadata_config: Arc, + allocator_config: Arc +) -> thread::JoinHandle> { + let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); + let sess = tcx.sess; + + // Compute the set of symbols we need to retain when doing LTO (if we need to) + let exported_symbols = { + let mut exported_symbols = FxHashMap::default(); + + let copy_symbols = |cnum| { + let symbols = tcx.exported_symbols(cnum) + .iter() + .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) + .collect(); + Arc::new(symbols) + }; + + match sess.lto() { + Lto::No => None, + Lto::ThinLocal => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + Some(Arc::new(exported_symbols)) + } + Lto::Fat | Lto::Thin => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + for &cnum in tcx.crates().iter() { + exported_symbols.insert(cnum, copy_symbols(cnum)); + } + Some(Arc::new(exported_symbols)) + } + } + }; + + // First up, convert our jobserver into a helper thread so we can use normal + // mpsc channels to manage our messages and such. + // After we've requested tokens then we'll, when we can, + // get tokens on `coordinator_receive` which will + // get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); + let helper = jobserver.into_helper_thread(move |token| { + drop(coordinator_send2.send(Box::new(Message::Token::(token)))); + }).expect("failed to spawn helper thread"); + + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { + if link::ignored_for_lto(sess, crate_info, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let assembler_cmd = if modules_config.no_integrated_as { + // HACK: currently we use linker (gcc) as our assembler + let (linker, flavor) = link::linker_and_flavor(sess); + + let (name, mut cmd) = get_linker(sess, &linker, flavor); + cmd.args(&sess.target.target.options.asm_args); + Some(Arc::new(AssemblerCommand { + name, + cmd, + })) + } else { + None + }; + + let cgcx = CodegenContext:: { + backend: backend.clone(), + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + fewer_names: sess.fewer_names(), + save_temps: sess.opts.cg.save_temps, + opts: Arc::new(sess.opts.clone()), + time_passes: sess.time_passes(), + exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), + coordinator_send, + diag_emitter: shared_emitter.clone(), + time_graph, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, false)), + total_cgus, + msvc_imps_needed: msvc_imps_needed(tcx), + target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), + debuginfo: tcx.sess.opts.debuginfo, + assembler_cmd, + }; + + // This is the "main loop" of parallel work happening for parallel codegen. + // It's here that we manage parallelism, schedule work, and work with + // messages coming from clients. + // + // There are a few environmental pre-conditions that shape how the system + // is set up: + // + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Codegen can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. + // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing codegen work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to codegen CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will codegen one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since codegen usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. + // + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). + // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- + // It's worth also touching on the management of parallelism here. We don't + // want to just spawn a thread per work item because while that's optimal + // parallelism it may overload a system with too many threads or violate our + // configuration for the maximum amount of cpu to use for this process. To + // manage this we use the `jobserver` crate. + // + // Job servers are an artifact of GNU make and are used to manage + // parallelism between processes. A jobserver is a glorified IPC semaphore + // basically. Whenever we want to run some work we acquire the semaphore, + // and whenever we're done with that work we release the semaphore. In this + // manner we can ensure that the maximum number of parallel workers is + // capped at any one point in time. + // + // LTO and the coordinator thread + // ------------------------------ + // + // The final job the coordinator thread is responsible for is managing LTO + // and how that works. When LTO is requested what we'll to is collect all + // optimized LLVM modules into a local vector on the coordinator. Once all + // modules have been codegened and optimized we hand this to the `lto` + // module for further optimization. The `lto` module will return back a list + // of more modules to work on, which the coordinator will continue to spawn + // work for. + // + // Each LLVM module is automatically sent back to the coordinator for LTO if + // necessary. There's already optimizations in place to avoid sending work + // back to the coordinator if LTO isn't requested. + return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + + let max_workers = ::num_cpus::get(); + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id + } + }; + + // This is where we collect codegen units that have gone all the way + // through codegen and LLVM. + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + let mut needs_lto = Vec::new(); + let mut lto_import_only_modules = Vec::new(); + let mut started_lto = false; + let mut codegen_aborted = false; + + // This flag tracks whether all items have gone through codegens + let mut codegen_done = false; + + // This is the queue of LLVM work items that still need processing. + let mut work_items = Vec::<(WorkItem, u64)>::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. + let mut tokens = Vec::new(); + + let mut main_thread_worker_state = MainThreadWorkerState::Idle; + let mut running = 0; + + let mut llvm_start_time = None; + + // Run the message loop while there's still anything that needs message + // processing. Note that as soon as codegen is aborted we simply want to + // wait for all existing work to finish, so many of the conditions here + // only apply if codegen hasn't been aborted as they represent pending + // work to be done. + while !codegen_done || + running > 0 || + (!codegen_aborted && ( + work_items.len() > 0 || + needs_lto.len() > 0 || + lto_import_only_modules.len() > 0 || + main_thread_worker_state != MainThreadWorkerState::Idle + )) + { + + // While there are still CGUs to be codegened, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For codegenning more CGU or for running them through LLVM. + if !codegen_done { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, codegen more items: + if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { + panic!("Could not send Message::CodegenItem to main thread") + } + main_thread_worker_state = MainThreadWorkerState::Codegenning; + } else { + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else if codegen_aborted { + // don't queue up any more work if codegen was aborted, we're + // just waiting for our existing children to finish + } else { + // If we've finished everything related to normal codegen + // then it must be the case that we've got some LTO work to do. + // Perform the serial work here of figuring out what we're + // going to LTO and then push a bunch of work items onto our + // queue to do LTO + if work_items.len() == 0 && + running == 0 && + main_thread_worker_state == MainThreadWorkerState::Idle { + assert!(!started_lto); + assert!(needs_lto.len() + lto_import_only_modules.len() > 0); + started_lto = true; + let modules = mem::replace(&mut needs_lto, Vec::new()); + let import_only_modules = + mem::replace(&mut lto_import_only_modules, Vec::new()); + for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { + let insertion_index = work_items + .binary_search_by_key(&cost, |&(_, cost)| cost) + .unwrap_or_else(|e| e); + work_items.insert(insertion_index, (work, cost)); + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + } + } + + // In this branch, we know that everything has been codegened, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } else { + // There is no unstarted work, so let the main thread + // take over for a running worker. Otherwise the + // implicit token would just go to waste. + // We reduce the `running` counter by one. The + // `tokens.truncate()` below will take care of + // giving the Token back. + debug_assert!(running > 0); + running -= 1; + main_thread_worker_state = MainThreadWorkerState::LLVMing; + } + } + MainThreadWorkerState::Codegenning => { + bug!("codegen worker should not be codegenning after \ + codegen was already completed") + } + MainThreadWorkerState::LLVMing => { + // Already making good use of that token + } + } + } + + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while !codegen_aborted && work_items.len() > 0 && running < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + + spawn_work(cgcx, item); + running += 1; + } + + // Relinquish accidentally acquired extra tokens + tokens.truncate(running); + + let msg = coordinator_receive.recv().unwrap(); + match *msg.downcast::>().ok().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + match token { + Ok(token) => { + tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to codegen demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!("{}", msg) + } + } + } + + Message::CodegenDone { llvm_work_item, cost } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); + + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + assert!(!codegen_aborted); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + Message::CodegenComplete => { + codegen_done = true; + assert!(!codegen_aborted); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + // If codegen is aborted that means translation was aborted due + // to some normal-ish compiler error. In this situation we want + // to exit as soon as possible, but we want to make sure all + // existing work has finished. Flag codegen as being done, and + // then conditions above will ensure no more work is spawned but + // we'll keep executing this loop until `running` hits 0. + Message::CodegenAborted => { + assert!(!codegen_aborted); + codegen_done = true; + codegen_aborted = true; + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + } + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { result: Ok(compiled_module), worker_id } => { + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } + } + Message::NeedsLTO { result, worker_id } => { + assert!(!started_lto); + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + free_worker_ids.push(worker_id); + needs_lto.push(result); + } + Message::AddImportOnlyModule { module_data, work_product } => { + assert!(!started_lto); + assert!(!codegen_done); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + lto_import_only_modules.push((module_data, work_product)); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + Message::Done { result: Err(()), worker_id: _ } => { + bug!("worker thread panicked"); + } + Message::CodegenItem => { + bug!("the coordinator should not receive codegen requests") + } + } + } + + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); + } + + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); + + Ok(CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module, + allocator_module: compiled_allocator_module, + }) + }); + + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of codegen + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue > 0 && + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } + + fn maybe_start_llvm_timer(config: &ModuleConfig, + llvm_start_time: &mut Option) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } + } + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + +fn spawn_work( + cgcx: CodegenContext, + work: WorkItem +) { + let depth = time_depth(); + + thread::spawn(move || { + set_time_depth(depth); + + // Set up a destructor which will fire off a message that we're done as + // we exit. + struct Bomb { + coordinator_send: Sender>, + result: Option>, + worker_id: usize, + } + impl Drop for Bomb { + fn drop(&mut self) { + let worker_id = self.worker_id; + let msg = match self.result.take() { + Some(WorkItemResult::Compiled(m)) => { + Message::Done:: { result: Ok(m), worker_id } + } + Some(WorkItemResult::NeedsLTO(m)) => { + Message::NeedsLTO:: { result: m, worker_id } + } + None => Message::Done:: { result: Err(()), worker_id } + }; + drop(self.coordinator_send.send(Box::new(msg))); + } + } + + let mut bomb = Bomb:: { + coordinator_send: cgcx.coordinator_send.clone(), + result: None, + worker_id: cgcx.worker, + }; + + // Execute the work itself, and if it finishes successfully then flag + // ourselves as a success as well. + // + // Note that we ignore any `FatalError` coming out of `execute_work_item`, + // as a diagnostic was already sent off to the main thread - just + // surface that there was an error in this worker. + bomb.result = { + let timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND, + &work.name()) + }); + let mut timeline = timeline.unwrap_or(Timeline::noop()); + execute_work_item(&cgcx, work, &mut timeline).ok() + }; + }); +} + +pub fn run_assembler( + cgcx: &CodegenContext, + handler: &Handler, + assembly: &Path, + object: &Path +) { + let assembler = cgcx.assembler_cmd + .as_ref() + .expect("cgcx.assembler_cmd is missing?"); + + let pname = &assembler.name; + let mut cmd = assembler.cmd.clone(); + cmd.arg("-c").arg("-o").arg(object).arg(assembly); + debug!("{:?}", cmd); + + match cmd.output() { + Ok(prog) => { + if !prog.status.success() { + let mut note = prog.stderr.clone(); + note.extend_from_slice(&prog.stdout); + + handler.struct_err(&format!("linking with `{}` failed: {}", + pname.display(), + prog.status)) + .note(&format!("{:?}", &cmd)) + .note(str::from_utf8(¬e[..]).unwrap()) + .emit(); + handler.abort_if_errors(); + } + }, + Err(e) => { + handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); + handler.abort_if_errors(); + } + } +} + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender, +} + +pub struct SharedEmitterMain { + receiver: Receiver, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + pub fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + pub fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session, blocking: bool) { + loop { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + code.clone(), + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} + +pub struct OngoingCodegen { + pub backend: B, + pub crate_name: Symbol, + pub crate_hash: Svh, + pub metadata: EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: LinkerInfo, + pub crate_info: CrateInfo, + pub time_graph: Option, + pub coordinator_send: Sender>, + pub codegen_worker_receive: Receiver>, + pub shared_emitter_main: SharedEmitterMain, + pub future: thread::JoinHandle>, + pub output_filenames: Arc, +} + +impl OngoingCodegen { + pub fn join( + self, + sess: &Session + ) -> (CodegenResults, FxHashMap) { + self.shared_emitter_main.check(sess, true); + let compiled_modules = match self.future.join() { + Ok(Ok(compiled_modules)) => compiled_modules, + Ok(Err(())) => { + sess.abort_if_errors(); + panic!("expected abort due to worker thread errors") + }, + Err(_) => { + bug!("panic during codegen/LLVM phase"); + } + }; + + sess.cgu_reuse_tracker.check_expected_reuse(sess); + + sess.abort_if_errors(); + + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + + let work_products = + copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, + &compiled_modules); + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.codegen_units() == 1 && sess.time_llvm_passes() { + self.backend.print_pass_timings() + } + + (CodegenResults { + crate_name: self.crate_name, + crate_hash: self.crate_hash, + metadata: self.metadata, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + crate_info: self.crate_info, + + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + metadata_module: compiled_modules.metadata_module, + }, work_products) + } + + pub fn submit_pre_codegened_module_to_llvm(&self, + tcx: TyCtxt, + module: ModuleCodegen) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + submit_codegened_module_to_llvm(&self.backend, tcx, module, cost); + } + + pub fn codegen_finished(&self, tcx: TyCtxt) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::))); + } + + /// Consume this context indicating that codegen was entirely aborted, and + /// we need to exit as quickly as possible. + /// + /// This method blocks the current thread until all worker threads have + /// finished, and all worker threads should have exited or be real close to + /// exiting at this point. + pub fn codegen_aborted(self) { + // Signal to the coordinator it should spawn no more work and start + // shutdown. + drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::))); + drop(self.future.join()); + } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } + + pub fn wait_for_signal_to_codegen_item(&self) { + match self.codegen_worker_receive.recv() { + Ok(Message::CodegenItem) => { + // Nothing to do + } + Ok(_) => panic!("unexpected message"), + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } +} + +pub fn submit_codegened_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: ModuleCodegen, + cost: u64 +) { + let llvm_work_item = WorkItem::Optimize(module); + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone:: { + llvm_work_item, + cost, + }))); +} + +pub fn submit_post_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone:: { + llvm_work_item, + cost: 0, + }))); +} + +pub fn submit_pre_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let filename = pre_lto_bitcode_filename(&module.name); + let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); + let file = fs::File::open(&bc_path).unwrap_or_else(|e| { + panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) + }); + + let mmap = unsafe { + memmap::Mmap::map(&file).unwrap_or_else(|e| { + panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) + }) + }; + // Schedule the module to be loaded + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::AddImportOnlyModule:: { + module_data: SerializedModule::FromUncompressedFile(mmap), + work_product: module.source, + }))); +} + +pub fn pre_lto_bitcode_filename(module_name: &str) -> String { + format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) +} + +fn msvc_imps_needed(tcx: TyCtxt) -> bool { + // This should never be true (because it's not supported). If it is true, + // something is wrong with commandline arg validation. + assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.opts.cg.prefer_dynamic)); + + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() +} diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs new file mode 100644 index 0000000000..266f78996b --- /dev/null +++ b/src/librustc_codegen_ssa/base.rs @@ -0,0 +1,977 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Codegen the completed AST to the LLVM IR. +//! +//! Some functions here, such as codegen_block and codegen_expr, return a value -- +//! the result of the codegen to LLVM -- while others, such as codegen_fn +//! and mono_item, are called only for the side effect of adding a +//! particular definition to the LLVM IR output we're producing. +//! +//! Hopefully useful general knowledge about codegen: +//! +//! * There's no way to find out the Ty type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, +//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, +//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen}; + +use rustc::dep_graph::cgu_reuse_tracker::CguReuse; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::middle::lang_items::StartFnLangItem; +use rustc::middle::weak_lang_items; +use rustc::mir::mono::{Stats, CodegenUnitNameBuilder}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; +use rustc::ty::query::Providers; +use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::util::common::{time, print_time_passes_entry}; +use rustc::util::profiling::ProfileCategory; +use rustc::session::config::{self, EntryFnType, Lto}; +use rustc::session::Session; +use mir::place::PlaceRef; +use back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm, + submit_post_lto_module_to_llvm}; +use {MemFlags, CrateInfo}; +use callee; +use rustc_mir::monomorphize::item::DefPathBasedNames; +use common::{RealPredicate, TypeKind, IntPredicate}; +use meth; +use mir; +use rustc::util::time_graph; +use rustc_mir::monomorphize::Instance; +use rustc_mir::monomorphize::partitioning::{CodegenUnit, CodegenUnitExt}; +use mono_item::MonoItem; +use rustc::util::nodemap::FxHashMap; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::sync::Lrc; +use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr}; +use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; + +use traits::*; + +use std::any::Any; +use std::cmp; +use std::ops::{Deref, DerefMut}; +use std::time::{Instant, Duration}; +use std::sync::mpsc; +use syntax_pos::Span; +use syntax::attr; +use rustc::hir; + +use mir::operand::OperandValue; + +use std::marker::PhantomData; + +pub struct StatRecorder<'a, 'tcx, Cx: 'a + CodegenMethods<'tcx>> { + cx: &'a Cx, + name: Option, + istart: usize, + _marker: PhantomData<&'tcx ()>, +} + +impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> StatRecorder<'a, 'tcx, Cx> { + pub fn new(cx: &'a Cx, name: String) -> Self { + let istart = cx.stats().borrow().n_llvm_insns; + StatRecorder { + cx, + name: Some(name), + istart, + _marker: PhantomData, + } + } +} + +impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> Drop for StatRecorder<'a, 'tcx, Cx> { + fn drop(&mut self) { + if self.cx.sess().codegen_stats() { + let mut stats = self.cx.stats().borrow_mut(); + let iend = stats.n_llvm_insns; + stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); + stats.n_fns += 1; + // Reset LLVM insn count to avoid compound costs. + stats.n_llvm_insns = self.istart; + } + } +} + +pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, + signed: bool) + -> IntPredicate { + match op { + hir::BinOpKind::Eq => IntPredicate::IntEQ, + hir::BinOpKind::Ne => IntPredicate::IntNE, + hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT }, + hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE }, + hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT }, + hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE }, + op => { + bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ + found {:?}", + op) + } + } +} + +pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { + match op { + hir::BinOpKind::Eq => RealPredicate::RealOEQ, + hir::BinOpKind::Ne => RealPredicate::RealUNE, + hir::BinOpKind::Lt => RealPredicate::RealOLT, + hir::BinOpKind::Le => RealPredicate::RealOLE, + hir::BinOpKind::Gt => RealPredicate::RealOGT, + hir::BinOpKind::Ge => RealPredicate::RealOGE, + op => { + bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ + found {:?}", + op); + } + } +} + +pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + lhs: Bx::Value, + rhs: Bx::Value, + t: Ty<'tcx>, + ret_ty: Bx::Type, + op: hir::BinOpKind +) -> Bx::Value { + let signed = match t.sty { + ty::Float(_) => { + let cmp = bin_op_to_fcmp_predicate(op); + let cmp = bx.fcmp(cmp, lhs, rhs); + return bx.sext(cmp, ret_ty); + }, + ty::Uint(_) => false, + ty::Int(_) => true, + _ => bug!("compare_simd_types: invalid SIMD type"), + }; + + let cmp = bin_op_to_icmp_predicate(op, signed); + let cmp = bx.icmp(cmp, lhs, rhs); + // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension + // to get the correctly sized type. This will compile to a single instruction + // once the IR is converted to assembly if the SIMD instruction is supported + // by the target architecture. + bx.sext(cmp, ret_ty) +} + +/// Retrieve the information we are losing (making dynamic) in an unsizing +/// adjustment. +/// +/// The `old_info` argument is a bit funny. It is intended for use +/// in an upcast, where the new vtable for an object will be derived +/// from the old one. +pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( + cx: &Cx, + source: Ty<'tcx>, + target: Ty<'tcx>, + old_info: Option, +) -> Cx::Value { + let (source, target) = cx.tcx().struct_lockstep_tails(source, target); + match (&source.sty, &target.sty) { + (&ty::Array(_, len), &ty::Slice(_)) => { + cx.const_usize(len.unwrap_usize(cx.tcx())) + } + (&ty::Dynamic(..), &ty::Dynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + old_info.expect("unsized_info: missing old info for trait upcast") + } + (_, &ty::Dynamic(ref data, ..)) => { + let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) + .field(cx, FAT_PTR_EXTRA); + cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), + cx.backend_type(vtable_ptr)) + } + _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", + source, + target), + } +} + +/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. +pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + src: Bx::Value, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (Bx::Value, Bx::Value) { + debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(_, a, _), + &ty::Ref(_, b, _)) | + (&ty::Ref(_, a, _), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bx.cx().layout_of(src_ty); + let dst_layout = bx.cx().layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bx.cx(), i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bx.cx(), i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)), + bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true))) + } + _ => bug!("unsize_thin_ptr: called on bad types"), + } +} + +/// Coerce `src`, which is a reference to a value of type `src_ty`, +/// to a value of type `dst_ty` and store the result in `dst` +pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + src: PlaceRef<'tcx, Bx::Value>, + dst: PlaceRef<'tcx, Bx::Value> +) { + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; + let mut coerce_ptr = || { + let (base, info) = match bx.load_operand(src).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); + (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() + }; + OperandValue::Pair(base, info).store(bx, dst); + }; + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(..), &ty::Ref(..)) | + (&ty::Ref(..), &ty::RawPtr(..)) | + (&ty::RawPtr(..), &ty::RawPtr(..)) => { + coerce_ptr() + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + coerce_ptr() + } + + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() { + let src_f = src.project_field(bx, i); + let dst_f = dst.project_field(bx, i); + + if dst_f.layout.is_zst() { + continue; + } + + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align, + src_f.layout, MemFlags::empty()); + } else { + coerce_unsized_into(bx, src_f, dst_f); + } + } + } + _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", + src_ty, + dst_ty), + } +} + +pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + op: hir::BinOpKind, + lhs: Bx::Value, + rhs: Bx::Value +) -> Bx::Value { + cast_shift_rhs(bx, op, lhs, rhs) +} + +fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + op: hir::BinOpKind, + lhs: Bx::Value, + rhs: Bx::Value, +) -> Bx::Value { + // Shifts may have any size int on the rhs + if op.is_shift() { + let mut rhs_llty = bx.cx().val_ty(rhs); + let mut lhs_llty = bx.cx().val_ty(lhs); + if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.cx().element_type(rhs_llty) + } + if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.cx().element_type(lhs_llty) + } + let rhs_sz = bx.cx().int_width(rhs_llty); + let lhs_sz = bx.cx().int_width(lhs_llty); + if lhs_sz < rhs_sz { + bx.trunc(rhs, lhs_llty) + } else if lhs_sz > rhs_sz { + // FIXME (#1877: If in the future shifting by negative + // values is no longer undefined then this is wrong. + bx.zext(rhs, lhs_llty) + } else { + rhs + } + } else { + rhs + } +} + +/// Returns whether this session's target will use SEH-based unwinding. +/// +/// This is only true for MSVC targets, and even then the 64-bit MSVC target +/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as +/// 64-bit MinGW) instead of "full SEH". +pub fn wants_msvc_seh(sess: &Session) -> bool { + sess.target.target.options.is_like_msvc +} + +pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value +) -> Bx::Value { + if bx.cx().val_ty(val) == bx.cx().type_i1() { + bx.zext(val, bx.cx().type_i8()) + } else { + val + } +} + +pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value, + layout: layout::TyLayout, +) -> Bx::Value { + if let layout::Abi::Scalar(ref scalar) = layout.abi { + return to_immediate_scalar(bx, val, scalar); + } + val +} + +pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value, + scalar: &layout::Scalar, +) -> Bx::Value { + if scalar.is_bool() { + return bx.trunc(val, bx.cx().type_i1()); + } + val +} + +pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + dst: Bx::Value, + dst_align: Align, + src: Bx::Value, + src_align: Align, + layout: TyLayout<'tcx>, + flags: MemFlags, +) { + let size = layout.size.bytes(); + if size == 0 { + return; + } + + bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags); +} + +pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx>, +) { + let _s = if cx.sess().codegen_stats() { + let mut instance_name = String::new(); + DefPathBasedNames::new(cx.tcx(), true, true) + .push_def_path(instance.def_id(), &mut instance_name); + Some(StatRecorder::new(cx, instance_name)) + } else { + None + }; + + // this is an info! to allow collecting monomorphization statistics + // and to allow finding the last function before LLVM aborts from + // release builds. + info!("codegen_instance({})", instance); + + let sig = instance.fn_sig(cx.tcx()); + let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + + let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(|| + bug!("Instance `{:?}` not already declared", instance)); + + cx.stats().borrow_mut().n_closures += 1; + + let mir = cx.tcx().instance_mir(instance.def); + mir::codegen_mir::(cx, lldecl, &mir, instance, sig); +} + +/// Create the `main` function which will initialize the rust runtime and call +/// users main function. +pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx +) { + let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { + Some((id, span, _)) => { + (cx.tcx().hir.local_def_id(id), span) + } + None => return, + }; + + let instance = Instance::mono(cx.tcx(), main_def_id); + + if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { + // We want to create the wrapper in the same codegen unit as Rust's main + // function. + return; + } + + let main_llfn = cx.get_fn(instance); + + let et = cx.sess().entry_fn.get().map(|e| e.2); + match et { + Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), + Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), + None => {} // Do nothing. + } + + fn create_entry_fn<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + sp: Span, + rust_main: Bx::Value, + rust_main_def_id: DefId, + use_start_lang_item: bool, + ) { + let llfty = + cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); + + let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); + // Given that `main()` has no arguments, + // then its return type cannot have + // late-bound regions, since late-bound + // regions must appear in the argument + // listing. + let main_ret_ty = cx.tcx().erase_regions( + &main_ret_ty.no_bound_vars().unwrap(), + ); + + if cx.get_defined_value("main").is_some() { + // FIXME: We should be smart and show a better diagnostic here. + cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") + .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") + .emit(); + cx.sess().abort_if_errors(); + bug!(); + } + let llfn = cx.declare_cfn("main", llfty); + + // `main` should respect same config for frame pointer elimination as rest of code + cx.set_frame_pointer_elimination(llfn); + cx.apply_target_cpu_attr(llfn); + + let mut bx = Bx::new_block(&cx, llfn, "top"); + + bx.insert_reference_to_gdb_debug_scripts_section_global(); + + // Params from native main() used as args for rust start function + let param_argc = cx.get_param(llfn, 0); + let param_argv = cx.get_param(llfn, 1); + let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); + let arg_argv = param_argv; + + let (start_fn, args) = if use_start_lang_item { + let start_def_id = cx.tcx().require_lang_item(StartFnLangItem); + let start_fn = callee::resolve_and_get_fn( + cx, + start_def_id, + cx.tcx().intern_substs(&[main_ret_ty.into()]), + ); + (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), + arg_argc, arg_argv]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![arg_argc, arg_argv]) + }; + + let result = bx.call(start_fn, &args, None); + let cast = bx.intcast(result, cx.type_int(), true); + bx.ret(cast); + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); + + +pub fn codegen_crate( + backend: B, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> +) -> OngoingCodegen { + + check_for_rustc_errors_attr(tcx); + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); + + // Codegen the metadata. + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + + let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("metadata")).as_str() + .to_string(); + let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); + let metadata = time(tcx.sess, "write metadata", || { + backend.write_metadata(tcx, &metadata_llvm_module) + }); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); + + let metadata_module = ModuleCodegen { + name: metadata_cgu_name, + module_llvm: metadata_llvm_module, + kind: ModuleKind::Metadata, + }; + + let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; + + // Skip crate items and just output metadata in -Z no-codegen mode. + if tcx.sess.opts.debugging_opts.no_codegen || + !tcx.sess.opts.output_types.should_codegen() { + let ongoing_codegen = start_async_codegen( + backend, + tcx, + time_graph, + metadata, + rx, + 1); + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + ongoing_codegen.codegen_finished(tcx); + + assert_and_save_dep_graph(tcx); + + ongoing_codegen.check_for_errors(tcx.sess); + + return ongoing_codegen; + } + + // Run the monomorphization collector and partition the collected items into + // codegen units. + let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; + let codegen_units = (*codegen_units).clone(); + + // Force all codegen_unit queries so they are already either red or green + // when compile_codegen_unit accesses them. We are not able to re-execute + // the codegen_unit query from just the DepNode, so an unknown color would + // lead to having to re-execute compile_codegen_unit, possibly + // unnecessarily. + if tcx.dep_graph.is_fully_enabled() { + for cgu in &codegen_units { + tcx.codegen_unit(cgu.name().clone()); + } + } + + let ongoing_codegen = start_async_codegen( + backend.clone(), + tcx, + time_graph.clone(), + metadata, + rx, + codegen_units.len()); + let ongoing_codegen = AbortCodegenOnDrop::(Some(ongoing_codegen)); + + // Codegen an allocator shim, if necessary. + // + // If the crate doesn't have an `allocator_kind` set then there's definitely + // no shim to generate. Otherwise we also check our dependency graph for all + // our output crate types. If anything there looks like its a `Dynamic` + // linkage, then it's already got an allocator shim and we'll be using that + // one instead. If nothing exists then it's our job to generate the + // allocator! + let any_dynamic_crate = tcx.sess.dependency_formats.borrow() + .iter() + .any(|(_, list)| { + use rustc::middle::dependency_format::Linkage; + list.iter().any(|&linkage| linkage == Linkage::Dynamic) + }); + let allocator_module = if any_dynamic_crate { + None + } else if let Some(kind) = *tcx.sess.allocator_kind.get() { + let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("allocator")).as_str() + .to_string(); + let modules = backend.new_metadata(tcx.sess, &llmod_id); + time(tcx.sess, "write allocator module", || { + backend.codegen_allocator(tcx, &modules, kind) + }); + + Some(ModuleCodegen { + name: llmod_id, + module_llvm: modules, + kind: ModuleKind::Allocator, + }) + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); + } + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + codegen_units + }; + + let mut total_codegen_time = Duration::new(0, 0); + let mut all_stats = Stats::default(); + + for cgu in codegen_units.into_iter() { + ongoing_codegen.wait_for_signal_to_codegen_item(); + ongoing_codegen.check_for_errors(tcx.sess); + + let cgu_reuse = determine_cgu_reuse(tcx, &cgu); + tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); + + match cgu_reuse { + CguReuse::No => { + let _timing_guard = time_graph.as_ref().map(|time_graph| { + time_graph.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + &format!("codegen {}", cgu.name())) + }); + let start_time = Instant::now(); + let stats = backend.compile_codegen_unit(tcx, *cgu.name()); + all_stats.extend(stats); + total_codegen_time += start_time.elapsed(); + false + } + CguReuse::PreLto => { + submit_pre_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + CguReuse::PostLto => { + submit_post_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + }; + } + + ongoing_codegen.codegen_finished(tcx); + + // Since the main thread is sometimes blocked during codegen, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "codegen to LLVM IR", + total_codegen_time); + + ::rustc_incremental::assert_module_sources::assert_module_sources(tcx); + + symbol_names_test::report_symbol_names(tcx); + + if tcx.sess.codegen_stats() { + println!("--- codegen stats ---"); + println!("n_glues_created: {}", all_stats.n_glues_created); + println!("n_null_glues: {}", all_stats.n_null_glues); + println!("n_real_glues: {}", all_stats.n_real_glues); + + println!("n_fns: {}", all_stats.n_fns); + println!("n_inlines: {}", all_stats.n_inlines); + println!("n_closures: {}", all_stats.n_closures); + println!("fn stats:"); + all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); + for &(ref name, insns) in all_stats.fn_stats.iter() { + println!("{} insns, {}", insns, *name); + } + } + + if tcx.sess.count_llvm_insns() { + for (k, v) in all_stats.llvm_insns.iter() { + println!("{:7} {}", *v, *k); + } + } + + ongoing_codegen.check_for_errors(tcx.sess); + + assert_and_save_dep_graph(tcx); + ongoing_codegen.into_inner() +} + +/// A curious wrapper structure whose only purpose is to call `codegen_aborted` +/// when it's dropped abnormally. +/// +/// In the process of working on rust-lang/rust#55238 a mysterious segfault was +/// stumbled upon. The segfault was never reproduced locally, but it was +/// suspected to be related to the fact that codegen worker threads were +/// sticking around by the time the main thread was exiting, causing issues. +/// +/// This structure is an attempt to fix that issue where the `codegen_aborted` +/// message will block until all workers have finished. This should ensure that +/// even if the main codegen thread panics we'll wait for pending work to +/// complete before returning from the main thread, hopefully avoiding +/// segfaults. +/// +/// If you see this comment in the code, then it means that this workaround +/// worked! We may yet one day track down the mysterious cause of that +/// segfault... +struct AbortCodegenOnDrop(Option>); + +impl AbortCodegenOnDrop { + fn into_inner(mut self) -> OngoingCodegen { + self.0.take().unwrap() + } +} + +impl Deref for AbortCodegenOnDrop { + type Target = OngoingCodegen; + + fn deref(&self) -> &OngoingCodegen { + self.0.as_ref().unwrap() + } +} + +impl DerefMut for AbortCodegenOnDrop { + fn deref_mut(&mut self) -> &mut OngoingCodegen { + self.0.as_mut().unwrap() + } +} + +impl Drop for AbortCodegenOnDrop { + fn drop(&mut self) { + if let Some(codegen) = self.0.take() { + codegen.codegen_aborted(); + } + } +} + +fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { + time(tcx.sess, + "assert dep graph", + || ::rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess, + "serialize dep graph", + || ::rustc_incremental::save_dep_graph(tcx)); +} + +impl CrateInfo { + pub fn new(tcx: TyCtxt) -> CrateInfo { + let mut info = CrateInfo { + panic_runtime: None, + compiler_builtins: None, + profiler_runtime: None, + sanitizer_runtime: None, + is_no_builtins: Default::default(), + native_libraries: Default::default(), + used_libraries: tcx.native_libraries(LOCAL_CRATE), + link_args: tcx.link_args(LOCAL_CRATE), + crate_name: Default::default(), + used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), + used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), + used_crate_source: Default::default(), + wasm_imports: Default::default(), + lang_item_to_crate: Default::default(), + missing_lang_items: Default::default(), + }; + let lang_items = tcx.lang_items(); + + let load_wasm_items = tcx.sess.crate_types.borrow() + .iter() + .any(|c| *c != config::CrateType::Rlib) && + tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; + + if load_wasm_items { + info.load_wasm_imports(tcx, LOCAL_CRATE); + } + + let crates = tcx.crates(); + + let n_crates = crates.len(); + info.native_libraries.reserve(n_crates); + info.crate_name.reserve(n_crates); + info.used_crate_source.reserve(n_crates); + info.missing_lang_items.reserve(n_crates); + + for &cnum in crates.iter() { + info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); + info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); + info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); + if tcx.is_panic_runtime(cnum) { + info.panic_runtime = Some(cnum); + } + if tcx.is_compiler_builtins(cnum) { + info.compiler_builtins = Some(cnum); + } + if tcx.is_profiler_runtime(cnum) { + info.profiler_runtime = Some(cnum); + } + if tcx.is_sanitizer_runtime(cnum) { + info.sanitizer_runtime = Some(cnum); + } + if tcx.is_no_builtins(cnum) { + info.is_no_builtins.insert(cnum); + } + if load_wasm_items { + info.load_wasm_imports(tcx, cnum); + } + let missing = tcx.missing_lang_items(cnum); + for &item in missing.iter() { + if let Ok(id) = lang_items.require(item) { + info.lang_item_to_crate.insert(item, id.krate); + } + } + + // No need to look for lang items that are whitelisted and don't + // actually need to exist. + let missing = missing.iter() + .cloned() + .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) + .collect(); + info.missing_lang_items.insert(cnum, missing); + } + + return info + } + + fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { + self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { + let instance = Instance::mono(tcx, id); + let import_name = tcx.symbol_name(instance); + + (import_name.to_string(), module.clone()) + })); + } +} + +fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { + let (all_mono_items, _) = + tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all_mono_items.contains(&id) +} + +pub fn provide_both(providers: &mut Providers) { + providers.dllimport_foreign_items = |tcx, krate| { + let module_map = tcx.foreign_modules(krate); + let module_map = module_map.iter() + .map(|lib| (lib.def_id, lib)) + .collect::>(); + + let dllimports = tcx.native_libraries(krate) + .iter() + .filter(|lib| { + if lib.kind != cstore::NativeLibraryKind::NativeUnknown { + return false + } + let cfg = match lib.cfg { + Some(ref cfg) => cfg, + None => return true, + }; + attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) + }) + .filter_map(|lib| lib.foreign_module) + .map(|id| &module_map[&id]) + .flat_map(|module| module.foreign_items.iter().cloned()) + .collect(); + Lrc::new(dllimports) + }; + + providers.is_dllimport_foreign_item = |tcx, def_id| { + tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) + }; +} + +fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: &CodegenUnit<'tcx>) + -> CguReuse { + if !tcx.dep_graph.is_fully_enabled() { + return CguReuse::No + } + + let work_product_id = &cgu.work_product_id(); + if tcx.dep_graph.previous_work_product(work_product_id).is_none() { + // We don't have anything cached for this CGU. This can happen + // if the CGU did not exist in the previous session. + return CguReuse::No + } + + // Try to mark the CGU as green. If it we can do so, it means that nothing + // affecting the LLVM module has changed and we can re-use a cached version. + // If we compile with any kind of LTO, this means we can re-use the bitcode + // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only + // know that later). If we are not doing LTO, there is only one optimized + // version of each module, so we re-use that. + let dep_node = cgu.codegen_dep_node(tcx); + assert!(!tcx.dep_graph.dep_node_exists(&dep_node), + "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", + cgu.name()); + + if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { + // We can re-use either the pre- or the post-thinlto state + if tcx.sess.lto() != Lto::No { + CguReuse::PreLto + } else { + CguReuse::PostLto + } + } else { + CguReuse::No + } +} diff --git a/src/librustc_codegen_ssa/callee.rs b/src/librustc_codegen_ssa/callee.rs new file mode 100644 index 0000000000..5ff1d9b599 --- /dev/null +++ b/src/librustc_codegen_ssa/callee.rs @@ -0,0 +1,46 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use traits::*; +use rustc::ty; +use rustc::ty::subst::Substs; +use rustc::hir::def_id::DefId; + +pub fn resolve_and_get_fn<'tcx, Cx: CodegenMethods<'tcx>>( + cx: &Cx, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Cx::Value { + cx.get_fn( + ty::Instance::resolve( + cx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} + +pub fn resolve_and_get_fn_for_vtable<'tcx, + Cx: Backend<'tcx> + MiscMethods<'tcx> + TypeMethods<'tcx> +>( + cx: &Cx, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Cx::Value { + cx.get_fn( + ty::Instance::resolve_for_vtable( + cx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs new file mode 100644 index 0000000000..8c53129abc --- /dev/null +++ b/src/librustc_codegen_ssa/common.rs @@ -0,0 +1,230 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(non_camel_case_types, non_snake_case)] + +use rustc::ty::{self, Ty, TyCtxt}; +use syntax_pos::{DUMMY_SP, Span}; + +use rustc::hir::def_id::DefId; +use rustc::middle::lang_items::LangItem; +use base; +use traits::*; + +use rustc::hir; +use traits::BuilderMethods; + +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) +} + +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) +} + +pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) +} + +pub enum IntPredicate { + IntEQ, + IntNE, + IntUGT, + IntUGE, + IntULT, + IntULE, + IntSGT, + IntSGE, + IntSLT, + IntSLE +} + + +#[allow(dead_code)] +pub enum RealPredicate { + RealPredicateFalse, + RealOEQ, + RealOGT, + RealOGE, + RealOLT, + RealOLE, + RealONE, + RealORD, + RealUNO, + RealUEQ, + RealUGT, + RealUGE, + RealULT, + RealULE, + RealUNE, + RealPredicateTrue +} + +pub enum AtomicRmwBinOp { + AtomicXchg, + AtomicAdd, + AtomicSub, + AtomicAnd, + AtomicNand, + AtomicOr, + AtomicXor, + AtomicMax, + AtomicMin, + AtomicUMax, + AtomicUMin +} + +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic, + Unordered, + Monotonic, + // Consume, // Not specified yet. + Acquire, + Release, + AcquireRelease, + SequentiallyConsistent, +} + +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TypeKind { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPC_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, +} + +// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement +// the HashStable trait. Normally DepGraph::with_task() calls are +// hidden behind queries, but CGU creation is a special case in two +// ways: (1) it's not a query and (2) CGU are output nodes, so their +// Fingerprints are not actually needed. It remains to be clarified +// how exactly this case will be handled in the red/green system but +// for now we content ourselves with providing a no-op HashStable +// implementation for CGUs. +mod temp_stable_hash_impls { + use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, + HashStable}; + use ModuleCodegen; + + impl HashStable for ModuleCodegen { + fn hash_stable(&self, + _: &mut HCX, + _: &mut StableHasher) { + // do nothing + } + } +} + +pub fn langcall(tcx: TyCtxt, + span: Option, + msg: &str, + li: LangItem) + -> DefId { + tcx.lang_items().require(li).unwrap_or_else(|s| { + let msg = format!("{} {}", msg, s); + match span { + Some(span) => tcx.sess.span_fatal(span, &msg[..]), + None => tcx.sess.fatal(&msg[..]), + } + }) +} + +// To avoid UB from LLVM, these two functions mask RHS with an +// appropriate mask unconditionally (i.e. the fallback behavior for +// all shifts). For 32- and 64-bit types, this matches the semantics +// of Java. (See related discussion on #1877 and #10183.) + +pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + lhs: Bx::Value, + rhs: Bx::Value +) -> Bx::Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + bx.shl(lhs, rhs) +} + +pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + lhs_t: Ty<'tcx>, + lhs: Bx::Value, + rhs: Bx::Value +) -> Bx::Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + let is_signed = lhs_t.is_signed(); + if is_signed { + bx.ashr(lhs, rhs) + } else { + bx.lshr(lhs, rhs) + } +} + +fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + rhs: Bx::Value +) -> Bx::Value { + let rhs_llty = bx.val_ty(rhs); + let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false); + bx.and(rhs, shift_val) +} + +pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + llty: Bx::Type, + mask_llty: Bx::Type, + invert: bool +) -> Bx::Value { + let kind = bx.type_kind(llty); + match kind { + TypeKind::Integer => { + // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. + let val = bx.int_width(llty) - 1; + if invert { + bx.const_int(mask_llty, !val as i64) + } else { + bx.const_uint(mask_llty, val) + } + }, + TypeKind::Vector => { + let mask = shift_mask_val( + bx, + bx.element_type(llty), + bx.element_type(mask_llty), + invert + ); + bx.vector_splat(bx.vector_length(mask_llty), mask) + }, + _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), + } +} diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs new file mode 100644 index 0000000000..bcf6d7b6bf --- /dev/null +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -0,0 +1,91 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax_pos::{BytePos, Span}; +use rustc::hir::def_id::CrateNum; +use std::cell::Cell; + +pub enum FunctionDebugContext { + RegularContext(FunctionDebugContextData), + DebugInfoDisabled, + FunctionWithoutDebugInfo, +} + +impl FunctionDebugContext { + pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData { + match *self { + FunctionDebugContext::RegularContext(ref data) => data, + FunctionDebugContext::DebugInfoDisabled => { + span_bug!( + span, + "debuginfo: Error trying to access FunctionDebugContext \ + although debug info is disabled!", + ); + } + FunctionDebugContext::FunctionWithoutDebugInfo => { + span_bug!( + span, + "debuginfo: Error trying to access FunctionDebugContext \ + for function that should be ignored by debug info!", + ); + } + } + } +} + +/// Enables emitting source locations for the given functions. +/// +/// Since we don't want source locations to be emitted for the function prelude, +/// they are disabled when beginning to codegen a new function. This functions +/// switches source location emitting on and must therefore be called before the +/// first real statement/expression of the function is codegened. +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { + match *dbg_context { + FunctionDebugContext::RegularContext(ref data) => { + data.source_locations_enabled.set(true) + }, + _ => { /* safe to ignore */ } + } +} + +pub struct FunctionDebugContextData { + pub fn_metadata: D, + pub source_locations_enabled: Cell, + pub defining_crate: CrateNum, +} + +pub enum VariableAccess<'a, V> { + // The llptr given is an alloca containing the variable's value + DirectVariable { alloca: V }, + // The llptr given is an alloca containing the start of some pointer chain + // leading to the variable's content. + IndirectVariable { alloca: V, address_operations: &'a [i64] } +} + +pub enum VariableKind { + ArgumentVariable(usize /*index*/), + LocalVariable, +} + + +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: Option, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_none() + } +} diff --git a/src/librustc_codegen_ssa/diagnostics.rs b/src/librustc_codegen_ssa/diagnostics.rs new file mode 100644 index 0000000000..abe1906888 --- /dev/null +++ b/src/librustc_codegen_ssa/diagnostics.rs @@ -0,0 +1,48 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +register_long_diagnostics! { + +E0668: r##" +Malformed inline assembly rejected by LLVM. + +LLVM checks the validity of the constraints and the assembly string passed to +it. This error implies that LLVM seems something wrong with the inline +assembly call. + +In particular, it can happen if you forgot the closing bracket of a register +constraint (see issue #51430): +```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) +#![feature(asm)] + +fn main() { + let rax: u64; + unsafe { + asm!("" :"={rax"(rax)); + println!("Accumulator is: {}", rax); + } +} +``` +"##, + +E0669: r##" +Cannot convert inline assembly operand to a single LLVM value. + +This error usually happens when trying to pass in a value to an input inline +assembly operand that is actually a pair of values. In particular, this can +happen when trying to pass in a slice, for instance a `&str`. In Rust, these +values are represented internally as a pair of values, the pointer and its +length. When passed as an input operand, this pair of values can not be +coerced into a register and thus we must fail with an error. +"##, + +} diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_ssa/glue.rs similarity index 67% rename from src/librustc_codegen_llvm/glue.rs rename to src/librustc_codegen_ssa/glue.rs index 842bdf3cb4..b3257dbc36 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -14,24 +14,22 @@ use std; -use builder::Builder; -use common::*; -use llvm; +use common::IntPredicate; use meth; -use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; -use value::Value; +use traits::*; -pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Option<&'ll Value>) - -> (&'ll Value, &'ll Value) { - debug!("calculate size of DST: {}; with lost info: {:?}", - t, info); - if bx.cx.type_is_sized(t) { - let (size, align) = bx.cx.size_and_align_of(t); - debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", - t, info, size, align); - let size = C_usize(bx.cx, size.bytes()); - let align = C_usize(bx.cx, align.abi()); +pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + t: Ty<'tcx>, + info: Option +) -> (Bx::Value, Bx::Value) { + let layout = bx.layout_of(t); + debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", + t, info, layout); + if !layout.is_unsized() { + let size = bx.const_usize(layout.size.bytes()); + let align = bx.const_usize(layout.align.abi.bytes()); return (size, align); } match t.sty { @@ -41,33 +39,30 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable)) } ty::Slice(_) | ty::Str => { - let unit = t.sequence_element_type(bx.tcx()); + let unit = layout.field(bx, 0); // The info in this case is the length of the str, so the size is that // times the unit size. - let (size, align) = bx.cx.size_and_align_of(unit); - (bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())), - C_usize(bx.cx, align.abi())) + (bx.mul(info.unwrap(), bx.const_usize(unit.size.bytes())), + bx.const_usize(unit.align.abi.bytes())) } _ => { - let cx = bx.cx; // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we // want to avoid, as the unsized field's alignment could be smaller. assert!(!t.is_simd()); - let layout = cx.layout_of(t); debug!("DST {} layout: {:?}", t, layout); let i = layout.fields.count() - 1; let sized_size = layout.fields.offset(i).bytes(); - let sized_align = layout.align.abi(); + let sized_align = layout.align.abi.bytes(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = C_usize(cx, sized_size); - let sized_align = C_usize(cx, sized_align); + let sized_size = bx.const_usize(sized_size); + let sized_align = bx.const_usize(sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field(cx, i).ty; + let field_ty = layout.field(bx, i).ty; let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding @@ -89,16 +84,17 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = match (const_to_opt_u128(sized_align, false), - const_to_opt_u128(unsized_align, false)) { + let align = match (bx.const_to_opt_u128(sized_align, false), + bx.const_to_opt_u128(unsized_align, false)) { (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) + bx.const_usize(std::cmp::max(sized_align, unsized_align) as u64) + } + _ => { + let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align); + bx.select(cmp, sized_align, unsized_align) } - _ => bx.select(bx.icmp(llvm::IntUGT, sized_align, unsized_align), - sized_align, - unsized_align) }; // Issue #27023: must add any necessary padding to `size` @@ -111,9 +107,11 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt // emulated via the semi-standard fast bit trick: // // `(size + (align-1)) & -align` - - let addend = bx.sub(align, C_usize(bx.cx, 1)); - let size = bx.and(bx.add(size, addend), bx.neg(align)); + let one = bx.const_usize(1); + let addend = bx.sub(align, one); + let add = bx.add(size, addend); + let neg = bx.neg(align); + let size = bx.and(add, neg); (size, align) } diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs new file mode 100644 index 0000000000..24ede4db6e --- /dev/null +++ b/src/librustc_codegen_ssa/lib.rs @@ -0,0 +1,187 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(box_patterns)] +#![feature(box_syntax)] +#![feature(custom_attribute)] +#![feature(libc)] +#![feature(rustc_diagnostic_macros)] +#![feature(in_band_lifetimes)] +#![feature(slice_sort_by_cached_key)] +#![feature(nll)] +#![allow(unused_attributes)] +#![allow(dead_code)] +#![feature(quote)] + +//! This crate contains codegen code that is used by all codegen backends (LLVM and others). +//! The backend-agnostic functions of this crate use functions defined in various traits that +//! have to be implemented by each backends. + +#[macro_use] extern crate bitflags; +#[macro_use] extern crate log; +extern crate rustc_apfloat; +#[macro_use] extern crate rustc; +extern crate rustc_target; +extern crate rustc_mir; +#[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_incremental; +extern crate rustc_codegen_utils; +extern crate rustc_data_structures; +extern crate rustc_allocator; +extern crate rustc_fs_util; +extern crate serialize; +extern crate rustc_errors; +extern crate rustc_demangle; +extern crate cc; +extern crate libc; +extern crate jobserver; +extern crate memmap; +extern crate num_cpus; + +use std::path::PathBuf; +use rustc::dep_graph::WorkProduct; +use rustc::session::config::{OutputFilenames, OutputType}; +use rustc::middle::lang_items::LangItem; +use rustc::hir::def_id::CrateNum; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync::Lrc; +use rustc_data_structures::svh::Svh; +use rustc::middle::cstore::{LibSource, CrateSource, NativeLibrary}; +use syntax_pos::symbol::Symbol; + +// NB: This module needs to be declared first so diagnostics are +// registered before they are used. +mod diagnostics; + +pub mod common; +pub mod traits; +pub mod mir; +pub mod debuginfo; +pub mod base; +pub mod callee; +pub mod glue; +pub mod meth; +pub mod mono_item; +pub mod back; + +pub struct ModuleCodegen { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + /// We currently generate these names via CodegenUnit::build_cgu_name(). + pub name: String, + pub module_llvm: M, + pub kind: ModuleKind, +} + +pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z"; + +impl ModuleCodegen { + pub fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + emit_bc_compressed: bool, + outputs: &OutputFilenames) -> CompiledModule { + let object = if emit_obj { + Some(outputs.temp_path(OutputType::Object, Some(&self.name))) + } else { + None + }; + let bytecode = if emit_bc { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) + } else { + None + }; + let bytecode_compressed = if emit_bc_compressed { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) + .with_extension(RLIB_BYTECODE_EXTENSION)) + } else { + None + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + object, + bytecode, + bytecode_compressed, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub object: Option, + pub bytecode: Option, + pub bytecode_compressed: Option, +} + +pub struct CachedModuleCodegen { + pub name: String, + pub source: WorkProduct, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + +bitflags! { + pub struct MemFlags: u8 { + const VOLATILE = 1 << 0; + const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; + } +} + +/// Misc info we load from metadata to persist beyond the tcx +pub struct CrateInfo { + pub panic_runtime: Option, + pub compiler_builtins: Option, + pub profiler_runtime: Option, + pub sanitizer_runtime: Option, + pub is_no_builtins: FxHashSet, + pub native_libraries: FxHashMap>>, + pub crate_name: FxHashMap, + pub used_libraries: Lrc>, + pub link_args: Lrc>, + pub used_crate_source: FxHashMap>, + pub used_crates_static: Vec<(CrateNum, LibSource)>, + pub used_crates_dynamic: Vec<(CrateNum, LibSource)>, + pub wasm_imports: FxHashMap, + pub lang_item_to_crate: FxHashMap, + pub missing_lang_items: FxHashMap>, +} + + +pub struct CodegenResults { + pub crate_name: Symbol, + pub modules: Vec, + pub allocator_module: Option, + pub metadata_module: CompiledModule, + pub crate_hash: Svh, + pub metadata: rustc::middle::cstore::EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: back::linker::LinkerInfo, + pub crate_info: CrateInfo, +} + +__build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_ssa/meth.rs similarity index 56% rename from src/librustc_codegen_llvm/meth.rs rename to src/librustc_codegen_ssa/meth.rs index 29c2e71960..3880935f0f 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -8,18 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, FnTypeExt}; +use rustc_target::abi::call::FnType; use callee; -use common::*; -use builder::Builder; -use consts; -use monomorphize; -use type_::Type; -use value::Value; +use rustc_mir::monomorphize; + +use traits::*; use rustc::ty::{self, Ty}; -use rustc::ty::layout::HasDataLayout; -use debuginfo; #[derive(Copy, Clone, Debug)] pub struct VirtualIndex(u64); @@ -28,33 +23,45 @@ pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0); pub const SIZE: VirtualIndex = VirtualIndex(1); pub const ALIGN: VirtualIndex = VirtualIndex(2); -impl<'a, 'tcx> VirtualIndex { +impl<'a, 'tcx: 'a> VirtualIndex { pub fn from_index(index: usize) -> Self { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>, - llvtable: &'ll Value, - fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value { + pub fn get_fn>( + self, + bx: &mut Bx, + llvtable: Bx::Value, + fn_ty: &FnType<'tcx, Ty<'tcx>> + ) -> Bx::Value { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to()); - let ptr_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align); + let llvtable = bx.pointercast( + llvtable, + bx.type_ptr_to(bx.fn_ptr_backend_type(fn_ty)) + ); + let ptr_align = bx.tcx().data_layout.pointer_align.abi; + let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); + let ptr = bx.load(gep, ptr_align); bx.nonnull_metadata(ptr); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr } - pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'ll Value { + pub fn get_usize>( + self, + bx: &mut Bx, + llvtable: Bx::Value + ) -> Bx::Value { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to()); - let usize_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align); + let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize())); + let usize_align = bx.tcx().data_layout.pointer_align.abi; + let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); + let ptr = bx.load(gep, usize_align); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr @@ -67,49 +74,49 @@ impl<'a, 'tcx> VirtualIndex { /// The vtables are cached instead of created on every call. /// /// The `trait_ref` encodes the erased self type. Hence if we are -/// making an object `Foo` from a value of type `Foo`, then +/// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. -pub fn get_vtable( - cx: &CodegenCx<'ll, 'tcx>, +pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( + cx: &Cx, ty: Ty<'tcx>, trait_ref: ty::PolyExistentialTraitRef<'tcx>, -) -> &'ll Value { - let tcx = cx.tcx; +) -> Cx::Value { + let tcx = cx.tcx(); debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); // Check the cache. - if let Some(&val) = cx.vtables.borrow().get(&(ty, trait_ref)) { + if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) { return val; } // Not in the cache. Build it. - let nullptr = C_null(Type::i8p(cx)); + let nullptr = cx.const_null(cx.type_i8p()); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { opt_mth.map_or(nullptr, |(def_id, substs)| { - callee::resolve_and_get_fn(cx, def_id, substs) + callee::resolve_and_get_fn_for_vtable(cx, def_id, substs) }) }); - let (size, align) = cx.size_and_align_of(ty); + let layout = cx.layout_of(ty); // ///////////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_mir/interpret/traits.rs // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ - callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), - C_usize(cx, size.bytes()), - C_usize(cx, align.abi()) + cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)), + cx.const_usize(layout.size.bytes()), + cx.const_usize(layout.align.abi.bytes()) ].iter().cloned().chain(methods).collect(); - let vtable_const = C_struct(cx, &components, false); - let align = cx.data_layout().pointer_align; - let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); + let vtable_const = cx.const_struct(&components, false); + let align = cx.data_layout().pointer_align.abi; + let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); - debuginfo::create_vtable_metadata(cx, ty, vtable); + cx.create_vtable_metadata(ty, vtable); - cx.vtables.borrow_mut().insert((ty, trait_ref), vtable); + cx.vtables().borrow_mut().insert((ty, trait_ref), vtable); vtable } diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs similarity index 92% rename from src/librustc_codegen_llvm/mir/analyze.rs rename to src/librustc_codegen_ssa/mir/analyze.rs index a63cbe70df..c7e2c76c3e 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_ssa/mir/analyze.rs @@ -18,11 +18,13 @@ use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext}; use rustc::mir::traversal; use rustc::ty; -use rustc::ty::layout::LayoutOf; -use type_of::LayoutLlvmExt; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; use super::FunctionCx; +use traits::*; -pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { +pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + fx: &FunctionCx<'a, 'tcx, Bx> +) -> BitSet { let mir = fx.mir; let mut analyzer = LocalAnalyzer::new(fx); @@ -32,10 +34,10 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { let ty = fx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); let layout = fx.cx.layout_of(ty); - if layout.is_llvm_immediate() { + if fx.cx.is_backend_immediate(layout) { // These sorts of types are immediates that we can store // in an Value without an alloca. - } else if layout.is_llvm_scalar_pair() { + } else if fx.cx.is_backend_scalar_pair(layout) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -51,8 +53,8 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { analyzer.non_ssa_locals } -struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { - fx: &'mir FunctionCx<'a, 'll, 'tcx>, +struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { + fx: &'mir FunctionCx<'a, 'tcx, Bx>, dominators: Dominators, non_ssa_locals: BitSet, // The location of the first visited direct assignment to each @@ -60,8 +62,8 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { first_assignment: IndexVec } -impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { - fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx>) -> Self { +impl> LocalAnalyzer<'mir, 'a, 'tcx, Bx> { + fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self { let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); let mut analyzer = LocalAnalyzer { @@ -102,7 +104,8 @@ impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { } } -impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { +impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> + for LocalAnalyzer<'mir, 'a, 'tcx, Bx> { fn visit_assign(&mut self, block: mir::BasicBlock, place: &mir::Place<'tcx>, @@ -141,7 +144,7 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { _ => None, }; if let Some((def_id, args)) = check { - if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() { + if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -173,21 +176,21 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { _ => false }; if is_consume { - let base_ty = proj.base.ty(self.fx.mir, cx.tcx); + let base_ty = proj.base.ty(self.fx.mir, cx.tcx()); let base_ty = self.fx.monomorphize(&base_ty); // ZSTs don't require any actual memory access. let elem_ty = base_ty - .projection_ty(cx.tcx, &proj.elem) - .to_ty(cx.tcx); + .projection_ty(cx.tcx(), &proj.elem) + .to_ty(cx.tcx()); let elem_ty = self.fx.monomorphize(&elem_ty); if cx.layout_of(elem_ty).is_zst() { return; } if let mir::ProjectionElem::Field(..) = proj.elem { - let layout = cx.layout_of(base_ty.to_ty(cx.tcx)); - if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { + let layout = cx.layout_of(base_ty.to_ty(cx.tcx())); + if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) { // Recurse with the same context, instead of `Projection`, // potentially stopping at non-operand projections, // which would trigger `not_ssa` on locals. @@ -219,7 +222,8 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { self.assign(local, location); } - PlaceContext::NonUse(_) => {} + PlaceContext::NonUse(_) | + PlaceContext::MutatingUse(MutatingUseContext::Retag) => {} PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) | PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => { @@ -251,8 +255,8 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { } PlaceContext::MutatingUse(MutatingUseContext::Drop) => { - let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx); - let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx)); + let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx()); + let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx())); // Only need the place if we're actually dropping it. if self.fx.cx.type_needs_drop(ty) { @@ -345,7 +349,7 @@ pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { - result[succ] = CleanupKind::Internal { funclet: funclet }; + result[succ] = CleanupKind::Internal { funclet }; } CleanupKind::Funclet => { if funclet != succ { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs similarity index 67% rename from src/librustc_codegen_llvm/mir/block.rs rename to src/librustc_codegen_ssa/mir/block.rs index 68e3022718..a3bfbc2211 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -8,23 +8,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, BasicBlock}; use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; -use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; +use rustc_target::abi::call::{ArgType, FnType, PassMode}; +use rustc_target::spec::abi::Abi; use base; -use callee; -use builder::{Builder, MemFlags}; -use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef}; -use consts; +use MemFlags; +use common::{self, IntPredicate}; use meth; -use monomorphize; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; +use rustc_mir::monomorphize; + +use traits::*; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -34,8 +31,11 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_block(&mut self, bb: mir::BasicBlock) { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_block( + &mut self, + bb: mir::BasicBlock, + ) { let mut bx = self.build_block(bb); let data = &self.mir[bb]; @@ -48,21 +48,33 @@ impl FunctionCx<'a, 'll, 'tcx> { self.codegen_terminator(bx, bb, data.terminator()); } - fn codegen_terminator(&mut self, - mut bx: Builder<'a, 'll, 'tcx>, - bb: mir::BasicBlock, - terminator: &mir::Terminator<'tcx>) - { + fn codegen_terminator( + &mut self, + mut bx: Bx, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx> + ) { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. - let tcx = bx.tcx(); + let tcx = self.cx.tcx(); let span = terminator.source_info.span; let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); - let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref()); - let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); - let cleanup_bundle = funclet.map(|l| l.bundle()); + // HACK(eddyb) force the right lifetimes, NLL can't figure them out. + fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + funclet_bb: Option + ) -> impl for<'b> Fn( + &'b FunctionCx<'a, 'tcx, Bx>, + ) -> Option<&'b Bx::Funclet> { + move |this| { + match funclet_bb { + Some(funclet_bb) => this.funclets[funclet_bb].as_ref(), + None => None, + } + } + } + let funclet = funclet_closure_factory(funclet_bb); let lltarget = |this: &mut Self, target: mir::BasicBlock| { let lltarget = this.blocks[target]; @@ -90,32 +102,33 @@ impl FunctionCx<'a, 'll, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.new_block(name); - trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + let mut trampoline = this.new_block(name); + trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget)); trampoline.llbb() } else { lltarget } }; - let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| { - let (lltarget, is_cleanupret) = lltarget(this, target); - if is_cleanupret { - // micro-optimization: generate a `ret` rather than a jump - // to a trampoline. - bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); - } else { - bx.br(lltarget); - } - }; + let funclet_br = + |this: &mut Self, bx: &mut Bx, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(funclet(this).unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + }; let do_call = | this: &mut Self, - bx: Builder<'a, 'll, 'tcx>, + bx: &mut Bx, fn_ty: FnType<'tcx, Ty<'tcx>>, - fn_ptr: &'ll Value, - llargs: &[&'ll Value], - destination: Option<(ReturnDest<'ll, 'tcx>, mir::BasicBlock)>, + fn_ptr: Bx::Value, + llargs: &[Bx::Value], + destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { @@ -128,27 +141,27 @@ impl FunctionCx<'a, 'll, 'tcx> { &llargs, ret_bx, llblock(this, cleanup), - cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, invokeret); + funclet(this)); + bx.apply_attrs_callsite(&fn_ty, invokeret); if let Some((ret_dest, target)) = destination { - let ret_bx = this.build_block(target); - this.set_debug_loc(&ret_bx, terminator.source_info); - this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret); + let mut ret_bx = this.build_block(target); + this.set_debug_loc(&mut ret_bx, terminator.source_info); + this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); } } else { - let llret = bx.call(fn_ptr, &llargs, cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, llret); + let llret = bx.call(fn_ptr, &llargs, funclet(this)); + bx.apply_attrs_callsite(&fn_ty, llret); if this.mir[bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested // struct, there are "symmetry" issues that cause // exponential inlining - see issue #41696. - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + bx.do_not_inline(llret); } if let Some((ret_dest, target)) = destination { - this.store_return(&bx, ret_dest, &fn_ty.ret, llret); + this.store_return(bx, ret_dest, &fn_ty.ret, llret); funclet_br(this, bx, target); } else { bx.unreachable(); @@ -156,42 +169,42 @@ impl FunctionCx<'a, 'll, 'tcx> { } }; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { - if let Some(cleanup_pad) = cleanup_pad { - bx.cleanup_ret(cleanup_pad, None); + if let Some(funclet) = funclet(self) { + bx.cleanup_ret(funclet, None); } else { - let slot = self.get_personality_slot(&bx); - let lp0 = slot.project_field(&bx, 0).load(&bx).immediate(); - let lp1 = slot.project_field(&bx, 1).load(&bx).immediate(); - slot.storage_dead(&bx); + let slot = self.get_personality_slot(&mut bx); + let lp0 = slot.project_field(&mut bx, 0); + let lp0 = bx.load_operand(lp0).immediate(); + let lp1 = slot.project_field(&mut bx, 1); + let lp1 = bx.load_operand(lp1).immediate(); + slot.storage_dead(&mut bx); if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = C_undef(self.landing_pad_type()); + let mut lp = bx.const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); } else { - bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle); + bx.call(bx.eh_unwind_resume(), &[lp0], funclet(self)); bx.unreachable(); } } } mir::TerminatorKind::Abort => { - // Call core::intrinsics::abort() - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); - bx.call(fnname, &[], None); + bx.abort(); bx.unreachable(); } mir::TerminatorKind::Goto { target } => { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { - let discr = self.codegen_operand(&bx, discr); + let discr = self.codegen_operand(&mut bx, discr); if targets.len() == 2 { // If there are two targets, emit br instead of switch let lltrue = llblock(self, targets[0]); @@ -205,9 +218,11 @@ impl FunctionCx<'a, 'll, 'tcx> { bx.cond_br(discr.immediate(), lltrue, llfalse); } } else { - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); - let llval = C_uint_big(switch_llty, values[0]); - let cmp = bx.icmp(llvm::IntEQ, discr.immediate(), llval); + let switch_llty = bx.immediate_backend_type( + bx.layout_of(switch_ty) + ); + let llval = bx.const_uint_big(switch_llty, values[0]); + let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } } else { @@ -215,9 +230,11 @@ impl FunctionCx<'a, 'll, 'tcx> { let switch = bx.switch(discr.immediate(), llblock(self, *otherwise), values.len()); - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + let switch_llty = bx.immediate_backend_type( + bx.layout_of(switch_ty) + ); for (&value, target) in values.iter().zip(targets) { - let llval = C_uint_big(switch_llty, value); + let llval = bx.const_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -232,11 +249,12 @@ impl FunctionCx<'a, 'll, 'tcx> { } PassMode::Direct(_) | PassMode::Pair(..) => { - let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE)); + let op = + self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE)); if let Ref(llval, _, align) = op.val { bx.load(llval, align) } else { - op.immediate_or_packed_pair(&bx) + op.immediate_or_packed_pair(&mut bx) } } @@ -254,19 +272,21 @@ impl FunctionCx<'a, 'll, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret"); - op.val.store(&bx, scratch); + let scratch = + PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + op.val.store(&mut bx, scratch); scratch.llval } Ref(llval, _, align) => { - assert_eq!(align.abi(), op.layout.align.abi(), + assert_eq!(align, op.layout.align.abi, "return place is unaligned!"); llval } }; - bx.load( - bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()), - self.fn_ty.ret.layout.align) + let addr = bx.pointercast(llslot, bx.type_ptr_to( + bx.cast_backend_type(&cast_ty) + )); + bx.load(addr, self.fn_ty.ret.layout.align.abi) } }; bx.ret(llval); @@ -279,15 +299,15 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty); + let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return } - let place = self.codegen_place(&bx, location); + let place = self.codegen_place(&mut bx, location); let (args1, args2); let mut args = if let Some(llextra) = place.llextra { args2 = [place.llval, llextra]; @@ -298,30 +318,29 @@ impl FunctionCx<'a, 'll, 'tcx> { }; let (drop_fn, fn_ty) = match ty.sty { ty::Dynamic(..) => { - let fn_ty = drop_fn.ty(bx.cx.tcx); - let sig = common::ty_fn_sig(bx.cx, fn_ty); - let sig = bx.tcx().normalize_erasing_late_bound_regions( + let sig = drop_fn.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - let fn_ty = FnType::new_vtable(bx.cx, sig, &[]); + let fn_ty = bx.new_vtable(sig, &[]); let vtable = args[1]; args = &args[..1]; - (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) + (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) } _ => { - (callee::get_fn(bx.cx, drop_fn), - FnType::of_instance(bx.cx, &drop_fn)) + (bx.get_fn(drop_fn), + bx.fn_type_of_instance(&drop_fn)) } }; - do_call(self, bx, fn_ty, drop_fn, args, + do_call(self, &mut bx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond = self.codegen_operand(&bx, cond).immediate(); - let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); + let cond = self.codegen_operand(&mut bx, cond).immediate(); + let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from @@ -330,7 +349,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bx.cx.check_overflow { + if !bx.check_overflow() { if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { const_cond = Some(expected); } @@ -338,13 +357,12 @@ impl FunctionCx<'a, 'll, 'tcx> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return; } // Pass the condition through llvm.expect for branch hinting. - let expect = bx.cx.get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None); + let cond = bx.expect(cond, expected); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -357,17 +375,17 @@ impl FunctionCx<'a, 'll, 'tcx> { // After this point, bx is the block for the call to panic. bx = panic_block; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); // Get the location information. let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); - let align = tcx.data_layout.aggregate_align - .max(tcx.data_layout.i32_align) - .max(tcx.data_layout.pointer_align); + let filename = bx.const_str_slice(filename); + let line = bx.const_u32(loc.line as u32); + let col = bx.const_u32(loc.col.to_usize() as u32 + 1); + let align = tcx.data_layout.aggregate_align.abi + .max(tcx.data_layout.i32_align.abi) + .max(tcx.data_layout.pointer_align.abi); // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { @@ -375,25 +393,28 @@ impl FunctionCx<'a, 'll, 'tcx> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = C_struct(bx.cx, &[filename, line, col], false); - let file_line_col = consts::addr_of(bx.cx, - file_line_col, - align, - Some("panic_bounds_check_loc")); + let file_line_col = bx.const_struct(&[filename, line, col], false); + let file_line_col = bx.static_addr_of( + file_line_col, + align, + Some("panic_bounds_check_loc") + ); (lang_items::PanicBoundsCheckFnLangItem, vec![file_line_col, index, len]) } _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.const_str_slice(msg_str); + let msg_file_line_col = bx.const_struct( + &[msg_str, filename, line, col], + false + ); + let msg_file_line_col = bx.static_addr_of( + msg_file_line_col, + align, + Some("panic_loc") + ); (lang_items::PanicFnLangItem, vec![msg_file_line_col]) } @@ -402,11 +423,11 @@ impl FunctionCx<'a, 'll, 'tcx> { // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.fn_type_of_instance(&instance); + let llfn = bx.get_fn(instance); // Codegen the actual panic invoke/call. - do_call(self, bx, fn_ty, llfn, &args, None, cleanup); + do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { @@ -421,11 +442,11 @@ impl FunctionCx<'a, 'll, 'tcx> { from_hir_call: _ } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.codegen_operand(&bx, func); + let callee = self.codegen_operand(&mut bx, func); let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { - (Some(ty::Instance::resolve(bx.cx.tcx, + (Some(ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs).unwrap()), @@ -455,8 +476,8 @@ impl FunctionCx<'a, 'll, 'tcx> { if intrinsic == Some("transmute") { if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; - self.codegen_transmute(&bx, &args[0], dest); - funclet_br(self, bx, target); + self.codegen_transmute(&mut bx, &args[0], dest); + funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -464,7 +485,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // we can do what we like. Here, we declare that transmuting // into an uninhabited type is impossible, so anything following // it must be unreachable. - assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); + assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); bx.unreachable(); } return; @@ -478,15 +499,15 @@ impl FunctionCx<'a, 'll, 'tcx> { let fn_ty = match def { Some(ty::InstanceDef::Virtual(..)) => { - FnType::new_vtable(bx.cx, sig, &extra_args) + bx.new_vtable(sig, &extra_args) } Some(ty::InstanceDef::DropGlue(_, None)) => { // empty drop glue - a nop. let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return; } - _ => FnType::new(bx.cx, sig, &extra_args) + _ => bx.new_fn_type(sig, &extra_args) }; // emit a panic instead of instantiating an uninhabited type @@ -495,12 +516,12 @@ impl FunctionCx<'a, 'll, 'tcx> { { let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); - let align = tcx.data_layout.aggregate_align - .max(tcx.data_layout.i32_align) - .max(tcx.data_layout.pointer_align); + let filename = bx.const_str_slice(filename); + let line = bx.const_u32(loc.line as u32); + let col = bx.const_u32(loc.col.to_usize() as u32 + 1); + let align = tcx.data_layout.aggregate_align.abi + .max(tcx.data_layout.i32_align.abi) + .max(tcx.data_layout.pointer_align.abi); let str = format!( "Attempted to instantiate uninhabited type {} using mem::{}", @@ -508,26 +529,28 @@ impl FunctionCx<'a, 'll, 'tcx> { if intrinsic == Some("init") { "zeroed" } else { "uninitialized" } ); let msg_str = Symbol::intern(&str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.const_str_slice(msg_str); + let msg_file_line_col = bx.const_struct( + &[msg_str, filename, line, col], + false, + ); + let msg_file_line_col = bx.static_addr_of( + msg_file_line_col, + align, + Some("panic_loc"), + ); // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.fn_type_of_instance(&instance); + let llfn = bx.get_fn(instance); // Codegen the actual panic invoke/call. do_call( self, - bx, + &mut bx, fn_ty, llfn, &[msg_file_line_col], @@ -544,19 +567,17 @@ impl FunctionCx<'a, 'll, 'tcx> { // Prepare the return value destination let ret_dest = if let Some((ref dest, _)) = *destination { let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs, + self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, is_intrinsic) } else { ReturnDest::Nothing }; if intrinsic.is_some() && intrinsic != Some("drop_in_place") { - use intrinsic::codegen_intrinsic_call; - let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) + bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -590,7 +611,7 @@ impl FunctionCx<'a, 'll, 'tcx> { ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty), + layout: bx.layout_of(ty), }; }, @@ -608,26 +629,26 @@ impl FunctionCx<'a, 'll, 'tcx> { ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty) + layout: bx.layout_of(ty) }; } } } - self.codegen_operand(&bx, arg) + self.codegen_operand(&mut bx, arg) }).collect(); - let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx); - codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest, - terminator.source_info.span); + let callee_ty = instance.as_ref().unwrap().ty(bx.tcx()); + bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, + terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval); + self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); } else { bx.unreachable(); } @@ -643,14 +664,54 @@ impl FunctionCx<'a, 'll, 'tcx> { (&args[..], None) }; - for (i, arg) in first_args.iter().enumerate() { - let mut op = self.codegen_operand(&bx, arg); + 'make_args: for (i, arg) in first_args.iter().enumerate() { + let mut op = self.codegen_operand(&mut bx, arg); + if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { - if let Pair(data_ptr, meta) = op.val { + if let Pair(..) = op.val { + // In the case of Rc, we need to explicitly pass a + // *mut RcBox with a Scalar (not ScalarPair) ABI. This is a hack + // that is understood elsewhere in the compiler as a method on + // `dyn Trait`. + // To get a `*mut RcBox`, we just keep unwrapping newtypes until + // we get a value of a built-in pointer type + 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() + && !op.layout.ty.is_region_ptr() + { + 'iter_fields: for i in 0..op.layout.fields.count() { + let field = op.extract_field(&mut bx, i); + if !field.layout.is_zst() { + // we found the one non-zero-sized field that is allowed + // now find *its* non-zero-sized field, or stop if it's a + // pointer + op = field; + continue 'descend_newtypes + } + } + + span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); + } + + // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its + // data pointer and vtable. Look up the method in the vtable, and pass + // the data pointer as the first argument + match op.val { + Pair(data_ptr, meta) => { + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&mut bx, meta, &fn_ty)); + llargs.push(data_ptr); + continue 'make_args + } + other => bug!("expected a Pair, got {:?}", other) + } + } else if let Ref(data_ptr, Some(meta), _) = op.val { + // by-value dynamic dispatch llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&bx, meta, &fn_ty)); + .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue; + } else { + span_bug!(span, "can't codegen a virtual call on {:?}", op); } } @@ -659,27 +720,27 @@ impl FunctionCx<'a, 'll, 'tcx> { match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&bx, op.layout, "const"); - op.val.store(&bx, tmp); + let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } _ => {} } - self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]); + self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { - self.codegen_arguments_untupled(&bx, tup, &mut llargs, + self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, &fn_ty.args[first_args.len()..]) } let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, - (None, Some(instance)) => callee::get_fn(bx.cx, instance), + (None, Some(instance)) => bx.get_fn(instance), _ => span_bug!(span, "no llfn for call"), }; - do_call(self, bx, fn_ty, fn_ptr, &llargs, + do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } @@ -690,14 +751,16 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_argument(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - op: OperandRef<'ll, 'tcx>, - llargs: &mut Vec<&'ll Value>, - arg: &ArgType<'tcx, Ty<'tcx>>) { + fn codegen_argument( + &mut self, + bx: &mut Bx, + op: OperandRef<'tcx, Bx::Value>, + llargs: &mut Vec, + arg: &ArgType<'tcx, Ty<'tcx>> + ) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty.llvm_type(bx.cx))); + llargs.push(bx.const_undef(bx.reg_backend_type(&ty))) } if arg.is_ignore() { @@ -734,18 +797,19 @@ impl FunctionCx<'a, 'll, 'tcx> { (scratch.llval, scratch.align, true) } _ => { - (op.immediate_or_packed_pair(bx), arg.layout.align, false) + (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false) } } } Ref(llval, _, align) => { - if arg.is_indirect() && align.abi() < arg.layout.align.abi() { + if arg.is_indirect() && align < arg.layout.align.abi { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); - base::memcpy_ty(bx, scratch.llval, llval, op.layout, align, MemFlags::empty()); + base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align, + op.layout, MemFlags::empty()); (scratch.llval, scratch.align, true) } else { (llval, align, true) @@ -756,8 +820,10 @@ impl FunctionCx<'a, 'll, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()), - align.min(arg.layout.align)); + let addr = bx.pointercast(llval, bx.type_ptr_to( + bx.cast_backend_type(&ty)) + ); + llval = bx.load(addr, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -778,11 +844,13 @@ impl FunctionCx<'a, 'll, 'tcx> { llargs.push(llval); } - fn codegen_arguments_untupled(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - operand: &mir::Operand<'tcx>, - llargs: &mut Vec<&'ll Value>, - args: &[ArgType<'tcx, Ty<'tcx>>]) { + fn codegen_arguments_untupled( + &mut self, + bx: &mut Bx, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + args: &[ArgType<'tcx, Ty<'tcx>>] + ) { let tuple = self.codegen_operand(bx, operand); // Handle both by-ref and immediate tuples. @@ -790,7 +858,8 @@ impl FunctionCx<'a, 'll, 'tcx> { let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); - self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]); + let field = bx.load_operand(field_ptr); + self.codegen_argument(bx, field, llargs, &args[i]); } } else if let Ref(_, Some(_), _) = tuple.val { bug!("closure arguments must be sized") @@ -803,14 +872,17 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, 'tcx> { - let cx = bx.cx; + fn get_personality_slot( + &mut self, + bx: &mut Bx + ) -> PlaceRef<'tcx, Bx::Value> { + let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of(cx.tcx.intern_tup(&[ - cx.tcx.mk_mut_ptr(cx.tcx.types.u8), - cx.tcx.types.i32 + let layout = cx.layout_of(cx.tcx().intern_tup(&[ + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + cx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); @@ -821,7 +893,10 @@ impl FunctionCx<'a, 'll, 'tcx> { /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock { + fn landing_pad_to( + &mut self, + target_bb: mir::BasicBlock + ) -> Bx::BasicBlock { if let Some(block) = self.landing_pads[target_bb] { return block; } @@ -832,54 +907,65 @@ impl FunctionCx<'a, 'll, 'tcx> { landing_pad } - fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBlock { + fn landing_pad_uncached( + &mut self, + target_bb: Bx::BasicBlock + ) -> Bx::BasicBlock { if base::wants_msvc_seh(self.cx.sess()) { span_bug!(self.mir.span, "landing pad was not inserted?") } - let bx = self.new_block("cleanup"); + let mut bx = self.new_block("cleanup"); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); let lp = bx.landing_pad(llretty, llpersonality, 1); bx.set_cleanup(lp); - let slot = self.get_personality_slot(&bx); - slot.storage_live(&bx); - Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot); + let slot = self.get_personality_slot(&mut bx); + slot.storage_live(&mut bx); + Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); bx.llbb() } - fn landing_pad_type(&self) -> &'ll Type { + fn landing_pad_type(&self) -> Bx::Type { let cx = self.cx; - Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false) + cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false) } - fn unreachable_block(&mut self) -> &'ll BasicBlock { + fn unreachable_block( + &mut self + ) -> Bx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let bl = self.new_block("unreachable"); - bl.unreachable(); - self.unreachable_block = Some(bl.llbb()); - bl.llbb() + let mut bx = self.new_block("unreachable"); + bx.unreachable(); + self.unreachable_block = Some(bx.llbb()); + bx.llbb() }) } - pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx> { - Builder::new_block(self.cx, self.llfn, name) + pub fn new_block(&self, name: &str) -> Bx { + Bx::new_block(self.cx, self.llfn, name) } - pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx> { - let bx = Builder::with_cx(self.cx); + pub fn build_block( + &self, + bb: mir::BasicBlock + ) -> Bx { + let mut bx = Bx::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } - fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, - dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, - llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) - -> ReturnDest<'ll, 'tcx> { + fn make_return_dest( + &mut self, + bx: &mut Bx, + dest: &mir::Place<'tcx>, + fn_ret: &ArgType<'tcx, Ty<'tcx>>, + llargs: &mut Vec, is_intrinsic: bool + ) -> ReturnDest<'tcx, Bx::Value> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret.is_ignore() { return ReturnDest::Nothing; @@ -917,7 +1003,7 @@ impl FunctionCx<'a, 'll, 'tcx> { self.codegen_place(bx, dest) }; if fn_ret.is_indirect() { - if dest.align.abi() < dest.layout.align.abi() { + if dest.align < dest.layout.align.abi { // Currently, MIR code generation does not create calls // that store directly to fields of packed structs (in // fact, the calls it creates write only to temps), @@ -933,20 +1019,23 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>, - src: &mir::Operand<'tcx>, - dst: &mir::Place<'tcx>) { + fn codegen_transmute( + &mut self, + bx: &mut Bx, + src: &mir::Operand<'tcx>, + dst: &mir::Place<'tcx> + ) { if let mir::Place::Local(index) = *dst { match self.locals[index] { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), LocalRef::Operand(None) => { - let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst)); + let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst)); assert!(!dst_layout.ty.has_erasable_regions()); let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); - let op = place.load(bx); + let op = bx.load_operand(place); place.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -961,30 +1050,35 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>, - src: &mir::Operand<'tcx>, - dst: PlaceRef<'ll, 'tcx>) { + fn codegen_transmute_into( + &mut self, + bx: &mut Bx, + src: &mir::Operand<'tcx>, + dst: PlaceRef<'tcx, Bx::Value> + ) { let src = self.codegen_operand(bx, src); - let llty = src.layout.llvm_type(bx.cx); - let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); - let align = src.layout.align.min(dst.layout.align); + let llty = bx.backend_type(src.layout); + let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty)); + let align = src.layout.align.abi.min(dst.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } // Stores the return value of a function call into it's final location. - fn store_return(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - dest: ReturnDest<'ll, 'tcx>, - ret_ty: &ArgType<'tcx, Ty<'tcx>>, - llval: &'ll Value) { + fn store_return( + &mut self, + bx: &mut Bx, + dest: ReturnDest<'tcx, Bx::Value>, + ret_ty: &ArgType<'tcx, Ty<'tcx>>, + llval: Bx::Value + ) { use self::ReturnDest::*; match dest { Nothing => (), - Store(dst) => ret_ty.store(bx, llval, dst), + Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst), IndirectOperand(tmp, index) => { - let op = tmp.load(bx); + let op = bx.load_operand(tmp); tmp.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -993,8 +1087,8 @@ impl FunctionCx<'a, 'll, 'tcx> { let op = if let PassMode::Cast(_) = ret_ty.mode { let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); - ret_ty.store(bx, llval, tmp); - let op = tmp.load(bx); + bx.store_arg_ty(&ret_ty, llval, tmp); + let op = bx.load_operand(tmp); tmp.storage_dead(bx); op } else { @@ -1006,13 +1100,13 @@ impl FunctionCx<'a, 'll, 'tcx> { } } -enum ReturnDest<'ll, 'tcx> { +enum ReturnDest<'tcx, V> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer - Store(PlaceRef<'ll, 'tcx>), + Store(PlaceRef<'tcx, V>), // Stores an indirect return value to an operand local place - IndirectOperand(PlaceRef<'ll, 'tcx>, mir::Local), + IndirectOperand(PlaceRef<'tcx, V>, mir::Local), // Stores a direct return value to an operand local place DirectOperand(mir::Local) } diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs new file mode 100644 index 0000000000..c03fff7806 --- /dev/null +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -0,0 +1,105 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::mir::interpret::ErrorHandled; +use rustc_mir::const_eval::const_field; +use rustc::mir; +use rustc_data_structures::indexed_vec::Idx; +use rustc::mir::interpret::{GlobalId, ConstValue}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout; +use syntax::source_map::Span; +use traits::*; + +use super::FunctionCx; + +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + fn fully_evaluate( + &mut self, + bx: &Bx, + constant: &'tcx ty::Const<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { + match constant.val { + ConstValue::Unevaluated(def_id, ref substs) => { + let tcx = bx.tcx(); + let param_env = ty::ParamEnv::reveal_all(); + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); + let cid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(param_env.and(cid)) + }, + _ => Ok(constant), + } + } + + pub fn eval_mir_constant( + &mut self, + bx: &Bx, + constant: &mir::Constant<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { + let c = self.monomorphize(&constant.literal); + self.fully_evaluate(bx, c) + } + + /// process constant containing SIMD shuffle indices + pub fn simd_shuffle_indices( + &mut self, + bx: &Bx, + span: Span, + ty: Ty<'tcx>, + constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>, + ) -> (Bx::Value, Ty<'tcx>) { + constant + .and_then(|c| { + let field_ty = c.ty.builtin_index().unwrap(); + let fields = match c.ty.sty { + ty::Array(_, n) => n.unwrap_usize(bx.tcx()), + ref other => bug!("invalid simd shuffle type: {}", other), + }; + let values: Result, ErrorHandled> = (0..fields).map(|field| { + let field = const_field( + bx.tcx(), + ty::ParamEnv::reveal_all(), + self.instance, + None, + mir::Field::new(field as usize), + c, + )?; + if let Some(prim) = field.val.try_to_scalar() { + let layout = bx.layout_of(field_ty); + let scalar = match layout.abi { + layout::Abi::Scalar(ref x) => x, + _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) + }; + Ok(bx.scalar_to_backend( + prim, scalar, + bx.immediate_backend_type(layout), + )) + } else { + bug!("simd shuffle field {:?}", field) + } + }).collect(); + let llval = bx.const_struct(&values?, false); + Ok((llval, c.ty)) + }) + .unwrap_or_else(|_| { + bx.tcx().sess.span_err( + span, + "could not evaluate shuffle_indices at compile time", + ); + // We've errored, so we don't have to produce working code. + let ty = self.monomorphize(&ty); + let llty = bx.backend_type(bx.layout_of(ty)); + (bx.const_undef(llty), ty) + }) + } +} diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs similarity index 78% rename from src/librustc_codegen_llvm/mir/mod.rs rename to src/librustc_codegen_ssa/mir/mod.rs index a6e2ccf92e..a992364959 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -8,23 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use common::{C_i32, C_null}; use libc::c_uint; -use llvm::{self, BasicBlock}; -use llvm::debuginfo::DIScope; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; -use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::ty::layout::{TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; use base; -use builder::Builder; -use common::{CodegenCx, Funclet}; -use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; -use monomorphize::Instance; -use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; -use type_::Type; -use value::Value; +use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; +use rustc_mir::monomorphize::Instance; +use rustc_target::abi::call::{FnType, PassMode}; +use traits::*; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -34,8 +28,6 @@ use std::iter; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::IndexVec; -pub use self::constant::codegen_static_initializer; - use self::analyze::CleanupKind; use self::place::PlaceRef; use rustc::mir::traversal; @@ -43,16 +35,16 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { +pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, - debug_context: FunctionDebugContext<'ll>, + debug_context: FunctionDebugContext, - llfn: &'ll Value, + llfn: Bx::Value, - cx: &'a CodegenCx<'ll, 'tcx>, + cx: &'a Bx::CodegenCx, fn_ty: FnType<'tcx, Ty<'tcx>>, @@ -63,24 +55,24 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - personality_slot: Option>, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec, + blocks: IndexVec, /// The funclet status of each basic block cleanup_kinds: IndexVec, /// When targeting MSVC, this stores the cleanup info for each funclet /// BB. This is initialized as we compute the funclets' head block in RPO. - funclets: &'a IndexVec>>, + funclets: IndexVec>, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>, + landing_pads: IndexVec>, /// Cached unreachable block - unreachable_block: Option<&'ll BasicBlock>, + unreachable_block: Option, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: @@ -97,32 +89,36 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec>, + locals: IndexVec>, /// Debug information for MIR scopes. - scopes: IndexVec>, + scopes: IndexVec>, /// If this function is being monomorphized, this contains the type substitutions used. param_substs: &'tcx Substs<'tcx>, } -impl FunctionCx<'a, 'll, 'tcx> { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { - self.cx.tcx.subst_and_normalize_erasing_regions( + self.cx.tcx().subst_and_normalize_erasing_regions( self.param_substs, ty::ParamEnv::reveal_all(), value, ) } - pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_>, source_info: mir::SourceInfo) { + pub fn set_debug_loc( + &mut self, + bx: &mut Bx, + source_info: mir::SourceInfo + ) { let (scope, span) = self.debug_loc(source_info); - debuginfo::set_source_location(&self.debug_context, bx, scope, span); + bx.set_source_location(&self.debug_context, scope, span); } - pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) { + pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option, Span) { // Bail out if debug info emission is not enabled. match self.debug_context { FunctionDebugContext::DebugInfoDisabled | @@ -162,34 +158,38 @@ impl FunctionCx<'a, 'll, 'tcx> { // corresponding to span's containing source scope. If so, we need to create a DIScope // "extension" into that file. fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos) - -> Option<&'ll DIScope> { + -> Option { let scope_metadata = self.scopes[scope_id].scope_metadata; if pos < self.scopes[scope_id].file_start_pos || pos >= self.scopes[scope_id].file_end_pos { - let cm = self.cx.sess().source_map(); + let sm = self.cx.sess().source_map(); let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate; - Some(debuginfo::extend_scope_to_file(self.cx, - scope_metadata.unwrap(), - &cm.lookup_char_pos(pos).file, - defining_crate)) + Some(self.cx.extend_scope_to_file( + scope_metadata.unwrap(), + &sm.lookup_char_pos(pos).file, + defining_crate + )) } else { scope_metadata } } } -enum LocalRef<'ll, 'tcx> { - Place(PlaceRef<'ll, 'tcx>), +enum LocalRef<'tcx, V> { + Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. - UnsizedPlace(PlaceRef<'ll, 'tcx>), - Operand(Option>), + UnsizedPlace(PlaceRef<'tcx, V>), + Operand(Option>), } -impl LocalRef<'ll, 'tcx> { - fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'ll, 'tcx> { +impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + cx: &Cx, + layout: TyLayout<'tcx>, + ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -203,18 +203,18 @@ impl LocalRef<'ll, 'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn codegen_mir( - cx: &'a CodegenCx<'ll, 'tcx>, - llfn: &'ll Value, +pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + llfn: Bx::Value, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, ) { - let fn_ty = FnType::new(cx, sig, &[]); + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir); - let bx = Builder::new_block(cx, llfn, "start"); + cx.create_function_debug_context(instance, sig, llfn, mir); + let mut bx = Bx::new_block(cx, llfn, "start"); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); @@ -224,7 +224,7 @@ pub fn codegen_mir( // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); - let block_bxs: IndexVec = + let block_bxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK && !reentrant_start_block { bx.llbb() @@ -234,8 +234,8 @@ pub fn codegen_mir( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context); - let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs); + let scopes = cx.create_mir_scopes(mir, &debug_context); + let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { instance, @@ -248,7 +248,7 @@ pub fn codegen_mir( unreachable_block: None, cleanup_kinds, landing_pads, - funclets: &funclets, + funclets, scopes, locals: IndexVec::new(), debug_context, @@ -262,37 +262,38 @@ pub fn codegen_mir( // Allocate variable and temp allocas fx.locals = { - let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals); + let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty)); + let layout = bx.layout_of(fx.monomorphize(&decl.ty)); assert!(!layout.ty.has_erasable_regions()); if let Some(name) = decl.name { // User variable let debug_scope = fx.scopes[decl.visibility_scope]; - let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full; + let dbg = debug_scope.is_valid() && + bx.sess().opts.debuginfo == DebugInfo::Full; if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx, layout); + return LocalRef::new_operand(bx.cx(), layout); } debug!("alloc: {:?} ({}) -> place", local, name); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str()); + PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str()); // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&bx, layout, &name.as_str()); + let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, scope: decl.visibility_scope, }); - declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(), + bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(), VariableAccess::DirectVariable { alloca: place.llval }, VariableKind::LocalVariable, span); } @@ -302,23 +303,26 @@ pub fn codegen_mir( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = llvm::get_param(llfn, 0); - LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) + let llretptr = fx.cx.get_param(llfn, 0); + LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { - let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local)); + let indirect_place = PlaceRef::alloca_unsized_indirect( + &mut bx, + layout, + &format!("{:?}", local), + ); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local))) + LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) } } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx, layout) + LocalRef::new_operand(bx.cx(), layout) } } }; @@ -356,19 +360,19 @@ pub fn codegen_mir( if !visited.contains(bb.index()) { debug!("codegen_mir: block {:?} was not visited", bb); unsafe { - llvm::LLVMDeleteBasicBlock(fx.blocks[bb]); + bx.delete_basic_block(fx.blocks[bb]); } } } } -fn create_funclets( +fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( mir: &'a Mir<'tcx>, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, cleanup_kinds: &IndexVec, - block_bxs: &IndexVec) - -> (IndexVec>, - IndexVec>>) + block_bxs: &IndexVec) + -> (IndexVec>, + IndexVec>) { block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { @@ -376,7 +380,7 @@ fn create_funclets( _ => return (None, None) } - let cleanup; + let funclet; let ret_llbb; match mir[bb].terminator.as_ref().map(|t| &t.kind) { // This is a basic block that we're aborting the program for, @@ -400,8 +404,8 @@ fn create_funclets( // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); - let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); + let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); let cs = cs_bx.catch_switch(None, None, 1); @@ -411,34 +415,37 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = C_null(Type::i8p(bx.cx)); - let sixty_four = C_i32(bx.cx, 64); - cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); + let null = bx.const_null(bx.type_i8p()); + let sixty_four = bx.const_i32(64); + funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } _ => { - let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); + let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); ret_llbb = cleanup_bx.llbb(); - cleanup = cleanup_bx.cleanup_pad(None, &[]); + funclet = cleanup_bx.cleanup_pad(None, &[]); cleanup_bx.br(llbb); } }; - (Some(ret_llbb), Some(Funclet::new(cleanup))) + (Some(ret_llbb), Some(funclet)) }).unzip() } /// Produce, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. -fn arg_local_refs( - bx: &Builder<'a, 'll, 'tcx>, - fx: &FunctionCx<'a, 'll, 'tcx>, - scopes: &IndexVec>, +fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + fx: &FunctionCx<'a, 'tcx, Bx>, + scopes: &IndexVec< + mir::SourceScope, + debuginfo::MirDebugScope + >, memory_locals: &BitSet, -) -> Vec> { +) -> Vec> { let mir = fx.mir; - let tcx = bx.tcx(); + let tcx = fx.cx.tcx(); let mut idx = 0; let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize; @@ -471,14 +478,15 @@ fn arg_local_refs( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } - arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i)); + let pr_field = place.project_field(bx, i); + bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } // Now that we have one alloca that contains the aggregate value, @@ -487,8 +495,7 @@ fn arg_local_refs( let variable_access = VariableAccess::DirectVariable { alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, @@ -514,21 +521,21 @@ fn arg_local_refs( let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { - return local(OperandRef::new_zst(bx.cx, arg.layout)); + return local(OperandRef::new_zst(bx.cx(), arg.layout)); } PassMode::Direct(_) => { - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -545,25 +552,25 @@ fn arg_local_refs( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; - PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) + PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; - let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); - indirect_operand.store(&bx, tmp); + indirect_operand.store(bx, tmp); tmp } else { let tmp = PlaceRef::alloca(bx, arg.layout, &name); - arg.store_fn_arg(bx, &mut llarg_idx, tmp); + bx.store_fn_arg(arg, &mut llarg_idx, tmp); tmp }; arg_scope.map(|scope| { @@ -577,8 +584,7 @@ fn arg_local_refs( alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg.layout.ty, @@ -593,7 +599,7 @@ fn arg_local_refs( // Or is it the closure environment? let (closure_layout, env_ref) = match arg.layout.ty.sty { ty::RawPtr(ty::TypeAndMut { ty, .. }) | - ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true), + ty::Ref(_, ty, _) => (bx.layout_of(ty), true), _ => (arg.layout, false) }; @@ -612,10 +618,10 @@ fn arg_local_refs( // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. // (cuviper) It seems to be fine without the alloca on LLVM 6 and later. - let env_alloca = !env_ref && unsafe { llvm::LLVMRustVersionMajor() < 6 }; + let env_alloca = !env_ref && bx.closure_env_needs_indirect_debuginfo(); let env_ptr = if env_alloca { let scratch = PlaceRef::alloca(bx, - bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + bx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), "__debuginfo_env_ptr"); bx.store(place.llval, scratch.llval, scratch.align); scratch.llval @@ -626,12 +632,7 @@ fn arg_local_refs( for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); - let ops = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref(), - llvm::LLVMRustDIBuilderCreateOpPlusUconst(), - byte_offset_of_var_in_env as i64, - llvm::LLVMRustDIBuilderCreateOpDeref()] - }; + let ops = bx.debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env); // The environment and the capture can each be indirect. @@ -650,8 +651,7 @@ fn arg_local_refs( alloca: env_ptr, address_operations: &ops }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, decl.debug_name, ty, @@ -672,7 +672,7 @@ fn arg_local_refs( mod analyze; mod block; -mod constant; +pub mod constant; pub mod place; pub mod operand; mod rvalue; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs similarity index 64% rename from src/librustc_codegen_llvm/mir/operand.rs rename to src/librustc_codegen_ssa/mir/operand.rs index 6b61abd15a..a85e75936d 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -8,40 +8,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::mir::interpret::{ConstValue, ConstEvalErr}; +use rustc::mir::interpret::{ConstValue, ErrorHandled}; use rustc::mir; use rustc::ty; use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; -use rustc_data_structures::sync::Lrc; use base; -use common::{CodegenCx, C_undef, C_usize}; -use builder::{Builder, MemFlags}; -use value::Value; -use type_of::LayoutLlvmExt; -use type_::Type; +use MemFlags; use glue; +use traits::*; + use std::fmt; use super::{FunctionCx, LocalRef}; -use super::constant::scalar_to_llvm; use super::place::PlaceRef; /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a /// safety check. #[derive(Copy, Clone, Debug)] -pub enum OperandValue<'ll> { +pub enum OperandValue { /// A reference to the actual operand. The data is guaranteed /// to be valid for the operand's lifetime. /// The second value, if any, is the extra data (vtable or length) /// which indicates that it refers to an unsized rvalue. - Ref(&'ll Value, Option<&'ll Value>, Align), + Ref(V, Option, Align), /// A single LLVM value. - Immediate(&'ll Value), + Immediate(V), /// A pair of immediate LLVM values. Used by fat pointers too. - Pair(&'ll Value, &'ll Value) + Pair(V, V) } /// An `OperandRef` is an "SSA" reference to a Rust value, along with @@ -53,37 +49,40 @@ pub enum OperandValue<'ll> { /// directly is sure to cause problems -- use `OperandRef::store` /// instead. #[derive(Copy, Clone)] -pub struct OperandRef<'ll, 'tcx> { +pub struct OperandRef<'tcx, V> { // The value. - pub val: OperandValue<'ll>, + pub val: OperandValue, // The layout of value, based on its Rust type. pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'ll, 'tcx> { +impl fmt::Debug for OperandRef<'tcx, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } -impl OperandRef<'ll, 'tcx> { - pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>, - layout: TyLayout<'tcx>) -> OperandRef<'ll, 'tcx> { +impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { + pub fn new_zst>( + cx: &Cx, + layout: TyLayout<'tcx> + ) -> OperandRef<'tcx, V> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))), layout } } - pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, - val: &'tcx ty::Const<'tcx>) - -> Result, Lrc>> { - let layout = bx.cx.layout_of(val.ty); + pub fn from_const>( + bx: &mut Bx, + val: &'tcx ty::Const<'tcx> + ) -> Result { + let layout = bx.cx().layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx, layout)); + return Ok(OperandRef::new_zst(bx.cx(), layout)); } let val = match val.val { @@ -93,11 +92,10 @@ impl OperandRef<'ll, 'tcx> { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = scalar_to_llvm( - bx.cx, + let llval = bx.cx().scalar_to_backend( x, scalar, - layout.immediate_llvm_type(bx.cx), + bx.cx().immediate_backend_type(layout), ); OperandValue::Immediate(llval) }, @@ -106,23 +104,20 @@ impl OperandRef<'ll, 'tcx> { layout::Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = scalar_to_llvm( - bx.cx, + let a_llval = bx.cx().scalar_to_backend( a, a_scalar, - layout.scalar_pair_element_llvm_type(bx.cx, 0, true), + bx.cx().scalar_pair_element_backend_type(layout, 0, true), ); - let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true); - let b_llval = scalar_to_llvm( - bx.cx, + let b_llval = bx.cx().scalar_to_backend( b, b_scalar, - b_layout, + bx.cx().scalar_pair_element_backend_type(layout, 1, true), ); OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(_, alloc, offset) => { - return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx)); + return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset))); }, }; @@ -134,14 +129,17 @@ impl OperandRef<'ll, 'tcx> { /// Asserts that this operand refers to a scalar and returns /// a reference to its value. - pub fn immediate(self) -> &'ll Value { + pub fn immediate(self) -> V { match self.val { OperandValue::Immediate(s) => s, _ => bug!("not immediate: {:?}", self) } } - pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + pub fn deref>( + self, + cx: &Cx + ) -> PlaceRef<'tcx, V> { let projected_ty = self.layout.ty.builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { @@ -154,21 +152,26 @@ impl OperandRef<'ll, 'tcx> { llval: llptr, llextra, layout, - align: layout.align, + align: layout.align.abi, } } /// If this operand is a `Pair`, we return an aggregate with the two values. /// For other cases, see `immediate`. - pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value { + pub fn immediate_or_packed_pair>( + self, + bx: &mut Bx + ) -> V { if let OperandValue::Pair(a, b) = self.val { - let llty = self.layout.llvm_type(bx.cx); + let llty = bx.cx().backend_type(self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = C_undef(llty); - llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); - llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); + let mut llpair = bx.cx().const_undef(llty); + let imm_a = base::from_immediate(bx, a); + let imm_b = base::from_immediate(bx, b); + llpair = bx.insert_value(llpair, imm_a, 0); + llpair = bx.insert_value(llpair, imm_b, 1); llpair } else { self.immediate() @@ -176,17 +179,20 @@ impl OperandRef<'ll, 'tcx> { } /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. - pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, - llval: &'ll Value, - layout: TyLayout<'tcx>) - -> OperandRef<'ll, 'tcx> { + pub fn from_immediate_or_packed_pair>( + bx: &mut Bx, + llval: V, + layout: TyLayout<'tcx> + ) -> Self { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. - let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a); - let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b); + let a_llval = bx.extract_value(llval, 0); + let a_llval = base::to_immediate_scalar(bx, a_llval, a); + let b_llval = bx.extract_value(llval, 1); + let b_llval = base::to_immediate_scalar(bx, b_llval, b); OperandValue::Pair(a_llval, b_llval) } else { OperandValue::Immediate(llval) @@ -194,14 +200,18 @@ impl OperandRef<'ll, 'tcx> { OperandRef { val, layout } } - pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'ll, 'tcx> { - let field = self.layout.field(bx.cx, i); + pub fn extract_field>( + &self, + bx: &mut Bx, + i: usize + ) -> Self { + let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx, field); + return OperandRef::new_zst(bx.cx(), field); } // Newtype of a scalar, scalar pair or vector. @@ -214,12 +224,12 @@ impl OperandRef<'ll, 'tcx> { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx)); + assert_eq!(field.size, a.value.size(bx.cx())); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx) - .abi_align(b.value.align(bx.cx))); - assert_eq!(field.size, b.value.size(bx.cx)); + assert_eq!(offset, a.value.size(bx.cx()) + .align_to(b.value.align(bx.cx()).abi)); + assert_eq!(field.size, b.value.size(bx.cx())); OperandValue::Immediate(b_llval) } } @@ -227,20 +237,31 @@ impl OperandRef<'ll, 'tcx> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, C_usize(bx.cx, i as u64))) + bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) }; // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + // Bools in union fields needs to be truncated. + let to_immediate_or_cast = |bx: &mut Bx, val, ty| { + if ty == bx.cx().type_i1() { + bx.trunc(val, ty) + } else { + bx.bitcast(val, ty) + } + }; + match val { OperandValue::Immediate(ref mut llval) => { - *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx)); + *llval = to_immediate_or_cast(bx, *llval, bx.cx().immediate_backend_type(field)); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true)); - *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true)); + *a = to_immediate_or_cast(bx, *a, bx.cx() + .scalar_pair_element_backend_type(field, 0, true)); + *b = to_immediate_or_cast(bx, *b, bx.cx() + .scalar_pair_element_backend_type(field, 1, true)); } OperandValue::Ref(..) => bug!() } @@ -252,27 +273,43 @@ impl OperandRef<'ll, 'tcx> { } } -impl OperandValue<'ll> { - pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { +impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { + pub fn store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V> + ) { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn volatile_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V> + ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn unaligned_volatile_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V>, + ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn nontemporal_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V> + ) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags( + fn store_with_flags>( self, - bx: &Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, + bx: &mut Bx, + dest: PlaceRef<'tcx, V>, flags: MemFlags, ) { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); @@ -283,8 +320,8 @@ impl OperandValue<'ll> { } match self { OperandValue::Ref(r, None, source_align) => { - base::memcpy_ty(bx, dest.llval, r, dest.layout, - source_align.min(dest.align), flags) + base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, + dest.layout, flags) } OperandValue::Ref(_, Some(_), _) => { bug!("cannot directly store unsized values"); @@ -298,7 +335,7 @@ impl OperandValue<'ll> { layout::Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout) }; - let b_offset = a_scalar.value.size(bx.cx).abi_align(b_scalar.value.align(bx.cx)); + let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi); let llptr = bx.struct_gep(dest.llval, 0); let val = base::from_immediate(bx, a); @@ -312,8 +349,11 @@ impl OperandValue<'ll> { } } } - - pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef<'ll, 'tcx>) { + pub fn store_unsized>( + self, + bx: &mut Bx, + indirect_dest: PlaceRef<'tcx, V> + ) { debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); let flags = MemFlags::empty(); @@ -329,26 +369,26 @@ impl OperandValue<'ll> { }; // FIXME: choose an appropriate alignment, or use dynamic align somehow - let max_align = Align::from_bits(128, 128).unwrap(); - let min_align = Align::from_bits(8, 8).unwrap(); + let max_align = Align::from_bits(128).unwrap(); + let min_align = Align::from_bits(8).unwrap(); // Allocate an appropriate region on the stack, and copy the value into it - let (llsize, _) = glue::size_and_align_of_dst(&bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align); - base::call_memcpy(&bx, lldst, llptr, llsize, min_align, flags); + let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); + let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. let indirect_operand = OperandValue::Pair(lldst, llextra); - indirect_operand.store(&bx, indirect_dest); + indirect_operand.store(bx, indirect_dest); } } -impl FunctionCx<'a, 'll, 'tcx> { - fn maybe_codegen_consume_direct(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> Option> - { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + fn maybe_codegen_consume_direct( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> Option> { debug!("maybe_codegen_consume_direct(place={:?})", place); // watch out for locals that do not have an @@ -379,9 +419,9 @@ impl FunctionCx<'a, 'll, 'tcx> { // ZSTs don't require any actual memory access. // FIXME(eddyb) deduplicate this with the identical // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx, 0); + let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx, elem)); + return Some(OperandRef::new_zst(bx.cx(), elem)); } } _ => {} @@ -392,19 +432,19 @@ impl FunctionCx<'a, 'll, 'tcx> { None } - pub fn codegen_consume(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> OperandRef<'ll, 'tcx> - { + pub fn codegen_consume( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> OperandRef<'tcx, Bx::Value> { debug!("codegen_consume(place={:?})", place); let ty = self.monomorphized_place_ty(place); - let layout = bx.cx.layout_of(ty); + let layout = bx.cx().layout_of(ty); // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx, layout); + return OperandRef::new_zst(bx.cx(), layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { @@ -413,14 +453,15 @@ impl FunctionCx<'a, 'll, 'tcx> { // for most places, to consume them we just load them // out from their home - self.codegen_place(bx, place).load(bx) + let place = self.codegen_place(bx, place); + bx.load_operand(place) } - pub fn codegen_operand(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - operand: &mir::Operand<'tcx>) - -> OperandRef<'ll, 'tcx> - { + pub fn codegen_operand( + &mut self, + bx: &mut Bx, + operand: &mir::Operand<'tcx> + ) -> OperandRef<'tcx, Bx::Value> { debug!("codegen_operand(operand={:?})", operand); match *operand { @@ -434,21 +475,23 @@ impl FunctionCx<'a, 'll, 'tcx> { self.eval_mir_constant(bx, constant) .and_then(|c| OperandRef::from_const(bx, c)) .unwrap_or_else(|err| { - err.report_as_error( - bx.tcx().at(constant.span), - "could not evaluate constant operand", - ); + match err { + // errored or at least linted + ErrorHandled::Reported => {}, + ErrorHandled::TooGeneric => { + bug!("codgen encountered polymorphic constant") + }, + } // Allow RalfJ to sleep soundly knowing that even refactorings that remove // the above error (or silence it under some conditions) will not cause UB - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); - bx.call(fnname, &[], None); + bx.abort(); // We've errored, so we don't have to produce working code. - let layout = bx.cx.layout_of(ty); - PlaceRef::new_sized( - C_undef(layout.llvm_type(bx.cx).ptr_to()), + let layout = bx.cx().layout_of(ty); + bx.load_operand(PlaceRef::new_sized( + bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), layout, - layout.align, - ).load(bx) + layout.align.abi, + )) }) } } diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs similarity index 57% rename from src/librustc_codegen_llvm/mir/place.rs rename to src/librustc_codegen_ssa/mir/place.rs index 3a1aaa8595..1aba53255e 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -8,31 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, LLVMConstInBoundsGEP}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use base; -use builder::Builder; -use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big}; -use consts; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; +use MemFlags; +use common::IntPredicate; use glue; -use mir::constant::const_alloc_to_llvm; + +use traits::*; use super::{FunctionCx, LocalRef}; -use super::operand::{OperandRef, OperandValue}; +use super::operand::OperandValue; #[derive(Copy, Clone, Debug)] -pub struct PlaceRef<'ll, 'tcx> { +pub struct PlaceRef<'tcx, V> { /// Pointer to the contents of the place - pub llval: &'ll Value, + pub llval: V, /// This place's extra data if it is unsized, or null - pub llextra: Option<&'ll Value>, + pub llextra: Option, /// Monomorphized type of this place, including variant information pub layout: TyLayout<'tcx>, @@ -41,12 +36,12 @@ pub struct PlaceRef<'ll, 'tcx> { pub align: Align, } -impl PlaceRef<'ll, 'tcx> { +impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized( - llval: &'ll Value, + llval: V, layout: TyLayout<'tcx>, align: Align, - ) -> PlaceRef<'ll, 'tcx> { + ) -> PlaceRef<'tcx, V> { assert!(!layout.is_unsized()); PlaceRef { llval, @@ -56,144 +51,73 @@ impl PlaceRef<'ll, 'tcx> { } } - pub fn from_const_alloc( - bx: &Builder<'a, 'll, 'tcx>, + pub fn alloca>( + bx: &mut Bx, layout: TyLayout<'tcx>, - alloc: &mir::interpret::Allocation, - offset: Size, - ) -> PlaceRef<'ll, 'tcx> { - let init = const_alloc_to_llvm(bx.cx, alloc); - let base_addr = consts::addr_of(bx.cx, init, layout.align, None); - - let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(bx.cx)), - &C_usize(bx.cx, offset.bytes()), - 1, - )}; - let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to()); - PlaceRef::new_sized(llval, layout, alloc.align) - } - - pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + name: &str + ) -> Self { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align); - Self::new_sized(tmp, layout, layout.align) + let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + Self::new_sized(tmp, layout, layout.align.abi) } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + pub fn alloca_unsized_indirect>( + bx: &mut Bx, + layout: TyLayout<'tcx>, + name: &str, + ) -> Self { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty); - let ptr_layout = bx.cx.layout_of(ptr_ty); + let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); + let ptr_layout = bx.cx().layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } - pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value { + pub fn len>( + &self, + cx: &Cx + ) -> V { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); self.llextra.unwrap() } else { - C_usize(cx, count) + cx.const_usize(count) } } else { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) } } - pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { - debug!("PlaceRef::load: {:?}", self); - - assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); - - if self.layout.is_zst() { - return OperandRef::new_zst(bx.cx, self.layout); - } - - let scalar_load_metadata = |load, scalar: &layout::Scalar| { - let vr = scalar.valid_range.clone(); - match scalar.value { - layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx.cx); - if range.start != range.end { - bx.range_metadata(load, range); - } - } - layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - bx.nonnull_metadata(load); - } - _ => {} - } - }; - - let val = if let Some(llextra) = self.llextra { - OperandValue::Ref(self.llval, Some(llextra), self.align) - } else if self.layout.is_llvm_immediate() { - let mut const_llval = None; - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - } - let llval = const_llval.unwrap_or_else(|| { - let load = bx.load(self.llval, self.align); - if let layout::Abi::Scalar(ref scalar) = self.layout.abi { - scalar_load_metadata(load, scalar); - } - load - }); - OperandValue::Immediate(base::to_immediate(bx, llval, self.layout)) - } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { - let b_offset = a.value.size(bx.cx).abi_align(b.value.align(bx.cx)); - let load = |i, scalar: &layout::Scalar, align| { - let llptr = bx.struct_gep(self.llval, i as u64); - let load = bx.load(llptr, align); - scalar_load_metadata(load, scalar); - if scalar.is_bool() { - bx.trunc(load, Type::i1(bx.cx)) - } else { - load - } - }; - OperandValue::Pair( - load(0, a, self.align), - load(1, b, self.align.restrict_for_offset(b_offset)), - ) - } else { - OperandValue::Ref(self.llval, None, self.align) - }; - - OperandRef { val, layout: self.layout } - } +} +impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. - pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> { - let cx = bx.cx; - let field = self.layout.field(cx, ix); + pub fn project_field>( + self, bx: &mut Bx, + ix: usize, + ) -> Self { + let field = self.layout.field(bx.cx(), ix); let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); - let simple = || { + let mut simple = || { // Unions and newtypes only use an offset of 0. let llval = if offset.bytes() == 0 { self.llval } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx))); + assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); bx.struct_gep(self.llval, 1) } else { - bx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()), - llextra: if cx.type_has_metadata(field.ty) { + llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), + llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None @@ -219,7 +143,7 @@ impl PlaceRef<'ll, 'tcx> { if def.repr.packed() { // FIXME(eddyb) generalize the adjustment when we // start supporting packing to larger alignments. - assert_eq!(self.layout.align.abi(), 1); + assert_eq!(self.layout.align.abi.bytes(), 1); return simple(); } } @@ -243,7 +167,7 @@ impl PlaceRef<'ll, 'tcx> { let meta = self.llextra; - let unaligned_offset = C_usize(cx, offset.bytes()); + let unaligned_offset = bx.cx().const_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -254,22 +178,23 @@ impl PlaceRef<'ll, 'tcx> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64)); - let offset = bx.and(bx.add(unaligned_offset, align_sub_1), - bx.neg(unsized_align)); + let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64)); + let and_lhs = bx.add(unaligned_offset, align_sub_1); + let and_rhs = bx.neg(unsized_align); + let offset = bx.and(and_lhs, and_rhs); debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx)); + let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = field.llvm_type(cx); + let ll_fty = bx.cx().backend_type(field); debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()), + llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -277,24 +202,28 @@ impl PlaceRef<'ll, 'tcx> { } /// Obtain the actual discriminant of a value. - pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value { - let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); + pub fn codegen_get_discr>( + self, + bx: &mut Bx, + cast_to: Ty<'tcx> + ) -> V { + let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); if self.layout.abi.is_uninhabited() { - return C_undef(cast_to); + return bx.cx().const_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( - index as u128, - |def| def.discriminant_for_variant(bx.cx.tcx, index).val); - return C_uint_big(cast_to, discr_val); + index.as_u32() as u128, + |def| def.discriminant_for_variant(bx.cx().tcx(), index).val); + return bx.cx().const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } let discr = self.project_field(bx, 0); - let lldiscr = discr.load(bx).immediate(); + let lldiscr = bx.load_operand(discr).immediate(); match self.layout.variants { layout::Variants::Single { .. } => bug!(), layout::Variants::Tagged { ref tag, .. } => { @@ -314,26 +243,30 @@ impl PlaceRef<'ll, 'tcx> { niche_start, .. } => { - let niche_llty = discr.layout.immediate_llvm_type(bx.cx); + let niche_llty = bx.cx().immediate_backend_type(discr.layout); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + bx.cx().const_null(niche_llty) } else { - C_uint_big(niche_llty, niche_start) + bx.cx().const_uint_big(niche_llty, niche_start) }; - bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval), - C_uint(cast_to, *niche_variants.start() as u64), - C_uint(cast_to, dataful_variant as u64)) + let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval); + bx.select(select_arg, + bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), + bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) } else { // Rebase from niche values to discriminant values. - let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); - let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); - let lldiscr_max = C_uint(niche_llty, *niche_variants.end() as u64); - bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max), - bx.intcast(lldiscr, cast_to, false), - C_uint(cast_to, dataful_variant as u64)) + let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); + let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); + let lldiscr_max = + bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64); + let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max); + let cast = bx.intcast(lldiscr, cast_to, false); + bx.select(select_arg, + cast, + bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) } } } @@ -341,8 +274,12 @@ impl PlaceRef<'ll, 'tcx> { /// Set the discriminant for a new value of the given case of the given /// representation. - pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) { - if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() { + pub fn codegen_set_discr>( + &self, + bx: &mut Bx, + variant_index: VariantIdx + ) { + if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { return; } match self.layout.variants { @@ -355,7 +292,7 @@ impl PlaceRef<'ll, 'tcx> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - C_uint_big(ptr.layout.llvm_type(bx.cx), to), + bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), ptr.llval, ptr.align); } @@ -366,28 +303,26 @@ impl PlaceRef<'ll, 'tcx> { .. } => { if variant_index != dataful_variant { - if bx.sess().target.target.arch == "arm" || - bx.sess().target.target.arch == "aarch64" { + if bx.cx().sess().target.target.arch == "arm" || + bx.cx().sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to()); - let fill_byte = C_u8(bx.cx, 0); - let (size, align) = self.layout.size_and_align(); - let size = C_usize(bx.cx, size.bytes()); - let align = C_u32(bx.cx, align.abi() as u32); - base::call_memset(bx, llptr, fill_byte, size, align, false); + let fill_byte = bx.cx().const_u8(0); + let size = bx.cx().const_usize(self.layout.size.bytes()); + bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); } let niche = self.project_field(bx, 0); - let niche_llty = niche.layout.immediate_llvm_type(bx.cx); - let niche_value = ((variant_index - *niche_variants.start()) as u128) + let niche_llty = bx.cx().immediate_backend_type(niche.layout); + let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); + let niche_value = (niche_value as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + bx.cx().const_null(niche_llty) } else { - C_uint_big(niche_llty, niche_value) + bx.cx().const_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -395,46 +330,53 @@ impl PlaceRef<'ll, 'tcx> { } } - pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) - -> PlaceRef<'ll, 'tcx> { + pub fn project_index>( + &self, + bx: &mut Bx, + llindex: V + ) -> Self { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), llextra: None, - layout: self.layout.field(bx.cx, 0), + layout: self.layout.field(bx.cx(), 0), align: self.align } } - pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) - -> PlaceRef<'ll, 'tcx> { + pub fn project_downcast>( + &self, + bx: &mut Bx, + variant_index: VariantIdx + ) -> Self { let mut downcast = *self; - downcast.layout = self.layout.for_variant(bx.cx, variant_index); + downcast.layout = self.layout.for_variant(bx.cx(), variant_index); // Cast to the appropriate variant struct type. - let variant_ty = downcast.layout.llvm_type(bx.cx); - downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to()); + let variant_ty = bx.cx().backend_type(downcast.layout); + downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); downcast } - pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_live>(&self, bx: &mut Bx) { bx.lifetime_start(self.llval, self.layout.size); } - pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_dead>(&self, bx: &mut Bx) { bx.lifetime_end(self.llval, self.layout.size); } } -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_place(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> PlaceRef<'ll, 'tcx> { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_place( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> PlaceRef<'tcx, Bx::Value> { debug!("codegen_place(place={:?})", place); - let cx = bx.cx; - let tcx = cx.tcx; + let cx = self.cx; + let tcx = self.cx.tcx(); if let mir::Place::Local(index) = *place { match self.locals[index] { @@ -442,7 +384,7 @@ impl FunctionCx<'a, 'll, 'tcx> { return place; } LocalRef::UnsizedPlace(place) => { - return place.load(bx).deref(&cx); + return bx.load_operand(place).deref(cx); } LocalRef::Operand(..) => { bug!("using operand local {:?} as place", place); @@ -462,7 +404,7 @@ impl FunctionCx<'a, 'll, 'tcx> { match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { mir::interpret::ConstValue::ByRef(_, alloc, offset) => { - PlaceRef::from_const_alloc(bx, layout, alloc, offset) + bx.cx().from_const_alloc(layout, alloc, offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -471,23 +413,24 @@ impl FunctionCx<'a, 'll, 'tcx> { // and compile-time agree on values // With floats that won't always be true // so we generate an abort - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); - bx.call(fnname, &[], None); - let llval = C_undef(layout.llvm_type(bx.cx).ptr_to()); - PlaceRef::new_sized(llval, layout, layout.align) + bx.abort(); + let llval = bx.cx().const_undef( + bx.cx().type_ptr_to(bx.cx().backend_type(layout)) + ); + PlaceRef::new_sized(llval, layout, layout.align.abi) } } } mir::Place::Static(box mir::Static { def_id, ty }) => { let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align) + PlaceRef::new_sized(bx.get_static(def_id), layout, layout.align.abi) }, mir::Place::Projection(box mir::Projection { ref base, elem: mir::ProjectionElem::Deref }) => { // Load the pointer from its location. - self.codegen_consume(bx, base).deref(bx.cx) + self.codegen_consume(bx, base).deref(bx.cx()) } mir::Place::Projection(ref projection) => { let cg_base = self.codegen_place(bx, &projection.base); @@ -506,34 +449,33 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); + let lloffset = bx.cx().const_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); - let lllen = cg_base.len(bx.cx); + let lloffset = bx.cx().const_usize(offset as u64); + let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - C_usize(bx.cx, from as u64)); + bx.cx().const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } - .projection_ty(tcx, &projection.elem) - .to_ty(bx.tcx()); - subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty)); + .projection_ty(tcx, &projection.elem).to_ty(tcx); + subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - C_usize(bx.cx, (from as u64) + (to as u64)))); + bx.cx().const_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - subslice.layout.llvm_type(bx.cx).ptr_to()); + bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout))); subslice } @@ -548,7 +490,7 @@ impl FunctionCx<'a, 'll, 'tcx> { } pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { - let tcx = self.cx.tcx; + let tcx = self.cx.tcx(); let place_ty = place.ty(self.mir, tcx); self.monomorphize(&place_ty.to_ty(tcx)) } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs similarity index 66% rename from src/librustc_codegen_llvm/mir/rvalue.rs rename to src/librustc_codegen_ssa/mir/rvalue.rs index fa22bdff94..dc7b1ec37b 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -8,57 +8,53 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; use std::{u128, i128}; use base; -use builder::Builder; +use MemFlags; use callee; -use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big}; -use consts; -use monomorphize; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; +use common::{self, RealPredicate, IntPredicate}; +use rustc_mir::monomorphize; + +use traits::*; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_rvalue(&mut self, - bx: Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> - { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_rvalue( + &mut self, + mut bx: Bx, + dest: PlaceRef<'tcx, Bx::Value>, + rvalue: &mir::Rvalue<'tcx> + ) -> Bx { debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); + let cg_operand = self.codegen_operand(&mut bx, operand); // FIXME: consider not copying constants through stack. (fixable by codegenning // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - cg_operand.val.store(&bx, dest); + cg_operand.val.store(&mut bx, dest); bx } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. - if dest.layout.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); return bx; } @@ -66,7 +62,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // this to be eliminated by MIR building, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); match operand.val { OperandValue::Pair(..) | OperandValue::Immediate(_) => { @@ -77,15 +73,15 @@ impl FunctionCx<'a, 'll, 'tcx> { // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp"); - scratch.storage_live(&bx); - operand.val.store(&bx, scratch); - base::coerce_unsized_into(&bx, scratch, dest); - scratch.storage_dead(&bx); + let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + scratch.storage_live(&mut bx); + operand.val.store(&mut bx, scratch); + base::coerce_unsized_into(&mut bx, scratch, dest); + scratch.storage_dead(&mut bx); } OperandValue::Ref(llref, None, align) => { let source = PlaceRef::new_sized(llref, operand.layout, align); - base::coerce_unsized_into(&bx, source, dest); + base::coerce_unsized_into(&mut bx, source, dest); } OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue") @@ -95,51 +91,50 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::Repeat(ref elem, count) => { - let cg_elem = self.codegen_operand(&bx, elem); + let cg_elem = self.codegen_operand(&mut bx, elem); // Do not generate the loop for zero-sized elements or empty arrays. if dest.layout.is_zst() { return bx; } - - let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval; + let zero = bx.cx().const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = C_i32(bx.cx, dest.align.abi() as i32); - let size = C_usize(bx.cx, dest.layout.size.bytes()); + let size = bx.cx().const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let fill = C_u8(bx.cx, 0); - base::call_memset(&bx, start, fill, size, align, false); + if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { + let fill = bx.cx().const_u8(0); + bx.memset(start, fill, size, dest.align, MemFlags::empty()); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays - let v = base::from_immediate(&bx, v); - if common::val_ty(v) == Type::i8(bx.cx) { - base::call_memset(&bx, start, v, size, align, false); + let v = base::from_immediate(&mut bx, v); + if bx.cx().val_ty(v) == bx.cx().type_i8() { + bx.memset(start, v, size, dest.align, MemFlags::empty()); return bx; } } - let count = C_usize(bx.cx, count); - let end = dest.project_index(&bx, count).llval; + let count = bx.cx().const_usize(count); + let end = dest.project_index(&mut bx, count).llval; - let header_bx = bx.build_sibling_block("repeat_loop_header"); - let body_bx = bx.build_sibling_block("repeat_loop_body"); + let mut header_bx = bx.build_sibling_block("repeat_loop_header"); + let mut body_bx = bx.build_sibling_block("repeat_loop_body"); let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); - let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]); + let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); - let keep_going = header_bx.icmp(llvm::IntNE, current, end); + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - cg_elem.val.store(&body_bx, + cg_elem.val.store(&mut body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]); + let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -149,9 +144,9 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { - dest.codegen_set_discr(&bx, variant_index); + dest.codegen_set_discr(&mut bx, variant_index); if adt_def.is_enum() { - (dest.project_downcast(&bx, variant_index), active_field_index) + (dest.project_downcast(&mut bx, variant_index), active_field_index) } else { (dest, active_field_index) } @@ -159,11 +154,12 @@ impl FunctionCx<'a, 'll, 'tcx> { _ => (dest, None) }; for (i, operand) in operands.iter().enumerate() { - let op = self.codegen_operand(&bx, operand); + let op = self.codegen_operand(&mut bx, operand); // Do not generate stores and GEPis for zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - op.val.store(&bx, dest.project_field(&bx, field_index)); + let field = dest.project_field(&mut bx, field_index); + op.val.store(&mut bx, field); } } bx @@ -171,26 +167,26 @@ impl FunctionCx<'a, 'll, 'tcx> { _ => { assert!(self.rvalue_creates_operand(rvalue)); - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); bx } } } - pub fn codegen_rvalue_unsized(&mut self, - bx: Builder<'a, 'll, 'tcx>, - indirect_dest: PlaceRef<'ll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> - { + pub fn codegen_rvalue_unsized( + &mut self, + mut bx: Bx, + indirect_dest: PlaceRef<'tcx, Bx::Value>, + rvalue: &mir::Rvalue<'tcx>, + ) -> Bx { debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", indirect_dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); - cg_operand.val.store_unsized(&bx, indirect_dest); + let cg_operand = self.codegen_operand(&mut bx, operand); + cg_operand.val.store_unsized(&mut bx, indirect_dest); bx } @@ -198,29 +194,29 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - pub fn codegen_rvalue_operand(&mut self, - bx: Builder<'a, 'll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>) - { + pub fn codegen_rvalue_operand( + &mut self, + mut bx: Bx, + rvalue: &mir::Rvalue<'tcx> + ) -> (Bx, OperandRef<'tcx, Bx::Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") { + if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } OperandValue::Immediate( - callee::resolve_and_get_fn(bx.cx, def_id, substs)) + callee::resolve_and_get_fn(bx.cx(), def_id, substs)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.layout.ty) @@ -231,8 +227,8 @@ impl FunctionCx<'a, 'll, 'tcx> { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(callee::get_fn(bx.cx, instance)) + bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(bx.cx().get_fn(instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -244,7 +240,7 @@ impl FunctionCx<'a, 'll, 'tcx> { operand.val } mir::CastKind::Unsize => { - assert!(cast.is_llvm_scalar_pair()); + assert!(bx.cx().is_backend_scalar_pair(cast)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -255,12 +251,12 @@ impl FunctionCx<'a, 'll, 'tcx> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + bx.cx().scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize - let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata, + let (lldata, llextra) = base::unsize_thin_ptr(&mut bx, lldata, operand.layout.ty, cast.ty); OperandValue::Pair(lldata, llextra) } @@ -270,16 +266,16 @@ impl FunctionCx<'a, 'll, 'tcx> { } } } - mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { + mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if cast.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(cast) { let data_cast = bx.pointercast(data_ptr, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + bx.cx().scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = cast.immediate_llvm_type(bx.cx); + let llcast_ty = bx.cx().immediate_backend_type(cast); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -288,25 +284,26 @@ impl FunctionCx<'a, 'll, 'tcx> { } } mir::CastKind::Misc => { - assert!(cast.is_llvm_immediate()); - let ll_t_out = cast.immediate_llvm_type(bx.cx); + assert!(bx.cx().is_backend_immediate(cast)); + let ll_t_out = bx.cx().immediate_backend_type(cast); if operand.layout.abi.is_uninhabited() { + let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); return (bx, OperandRef { - val: OperandValue::Immediate(C_undef(ll_t_out)), + val, layout: cast, }); } let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = operand.layout.immediate_llvm_type(bx.cx); + let ll_t_in = bx.cx().immediate_backend_type(operand.layout); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx.tcx, index) + .discriminant_for_variant(bx.cx().tcx(), index) .val; - let discr = C_uint_big(ll_t_out, discr_val); + let discr = bx.cx().const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -327,18 +324,20 @@ impl FunctionCx<'a, 'll, 'tcx> { // then `i1 1` (i.e. E::B) is effectively `i8 -1`. signed = !scalar.is_bool() && s; - let er = scalar.valid_range_exclusive(bx.cx); + let er = scalar.valid_range_exclusive(bx.cx()); if er.end != er.start && scalar.valid_range.end() > scalar.valid_range.start() { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. - - base::call_assume(&bx, bx.icmp( - llvm::IntULE, + let ll_t_in_const = + bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()); + let cmp = bx.icmp( + IntPredicate::IntULE, llval, - C_uint_big(ll_t_in, *scalar.valid_range.end()) - )); + ll_t_in_const + ); + bx.assume(cmp); } } } @@ -348,8 +347,8 @@ impl FunctionCx<'a, 'll, 'tcx> { bx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { - let srcsz = ll_t_in.float_width(); - let dstsz = ll_t_out.float_width(); + let srcsz = bx.cx().float_width(ll_t_in); + let dstsz = bx.cx().float_width(ll_t_out); if dstsz > srcsz { bx.fpext(llval, ll_t_out) } else if srcsz > dstsz { @@ -366,15 +365,15 @@ impl FunctionCx<'a, 'll, 'tcx> { (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed); + let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => - cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out), + cast_int_to_float(&mut bx, signed, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(IntTy::I)) => - cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(_)) => - cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out), _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty) }; OperandValue::Immediate(newval) @@ -387,42 +386,42 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::Ref(_, bk, ref place) => { - let cg_place = self.codegen_place(&bx, place); + let cg_place = self.codegen_place(&mut bx, place); let ty = cg_place.layout.ty; // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. - let val = if !bx.cx.type_has_metadata(ty) { + let val = if !bx.cx().type_has_metadata(ty) { OperandValue::Immediate(cg_place.llval) } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) }; (bx, OperandRef { val, - layout: self.cx.layout_of(self.cx.tcx.mk_ref( - self.cx.tcx.types.re_erased, + layout: self.cx.layout_of(self.cx.tcx().mk_ref( + self.cx.tcx().types.re_erased, ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() } )), }) } mir::Rvalue::Len(ref place) => { - let size = self.evaluate_array_len(&bx, place); + let size = self.evaluate_array_len(&mut bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), - layout: bx.cx.layout_of(bx.tcx().types.usize), + layout: bx.cx().layout_of(bx.tcx().types.usize), }; (bx, operand) } mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); let llresult = match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.codegen_fat_ptr_binop(&bx, op, + self.codegen_fat_ptr_binop(&mut bx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, lhs.layout.ty) @@ -430,36 +429,36 @@ impl FunctionCx<'a, 'll, 'tcx> { (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty) + self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty) } _ => bug!() }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - layout: bx.cx.layout_of( + layout: bx.cx().layout_of( op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bx, operand) } mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); - let result = self.codegen_scalar_checked_binop(&bx, op, + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); + let result = self.codegen_scalar_checked_binop(&mut bx, op, lhs.immediate(), rhs.immediate(), lhs.layout.ty); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, - layout: bx.cx.layout_of(operand_ty) + layout: bx.cx().layout_of(operand_ty) }; (bx, operand) } mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); let lloperand = operand.immediate(); let is_float = operand.layout.ty.is_fp(); let llval = match op { @@ -478,8 +477,8 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::Rvalue::Discriminant(ref place) => { let discr_ty = rvalue.ty(&*self.mir, bx.tcx()); - let discr = self.codegen_place(&bx, place) - .codegen_get_discr(&bx, discr_ty); + let discr = self.codegen_place(&mut bx, place) + .codegen_get_discr(&mut bx, discr_ty); (bx, OperandRef { val: OperandValue::Immediate(discr), layout: self.cx.layout_of(discr_ty) @@ -487,9 +486,9 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { - assert!(bx.cx.type_is_sized(ty)); - let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes()); - let tcx = bx.tcx(); + assert!(bx.cx().type_is_sized(ty)); + let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); + let tcx = self.cx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), layout: self.cx.layout_of(tcx.types.usize), @@ -497,23 +496,24 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { - let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let (size, align) = bx.cx.size_and_align_of(content_ty); - let llsize = C_usize(bx.cx, size.bytes()); - let llalign = C_usize(bx.cx, align.abi()); - let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = box_layout.llvm_type(bx.cx); + let content_ty = self.monomorphize(&content_ty); + let content_layout = bx.cx().layout_of(content_ty); + let llsize = bx.cx().const_usize(content_layout.size.bytes()); + let llalign = bx.cx().const_usize(content_layout.align.abi.bytes()); + let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); + let llty_ptr = bx.cx().backend_type(box_layout); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); + bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = callee::get_fn(bx.cx, instance); - let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr); + let r = bx.cx().get_fn(instance); + let call = bx.call(r, &[llsize, llalign], None); + let val = bx.pointercast(call, llty_ptr); let operand = OperandRef { val: OperandValue::Immediate(val), @@ -522,14 +522,14 @@ impl FunctionCx<'a, 'll, 'tcx> { (bx, operand) } mir::Rvalue::Use(ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); (bx, operand) } mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, self.cx.tcx()); (bx, OperandRef::new_zst(self.cx, self.cx.layout_of(self.monomorphize(&ty)))) } @@ -538,32 +538,32 @@ impl FunctionCx<'a, 'll, 'tcx> { fn evaluate_array_len( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, place: &mir::Place<'tcx>, - ) -> &'ll Value { + ) -> Bx::Value { // ZST are passed as operands and require special handling // because codegen_place() panics if Local is operand. if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx.tcx); - return common::C_usize(bx.cx, n); + let n = n.unwrap_usize(bx.cx().tcx()); + return bx.cx().const_usize(n); } } } // use common size calculation for non zero-sized types - let cg_value = self.codegen_place(&bx, place); - return cg_value.len(bx.cx); + let cg_value = self.codegen_place(bx, place); + return cg_value.len(bx.cx()); } pub fn codegen_scalar_binop( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, + lhs: Bx::Value, + rhs: Bx::Value, input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Bx::Value { let is_float = input_ty.is_fp(); let is_signed = input_ty.is_signed(); let is_unit = input_ty.is_unit(); @@ -605,7 +605,7 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - C_bool(bx.cx, match op { + bx.cx().const_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -626,45 +626,40 @@ impl FunctionCx<'a, 'll, 'tcx> { pub fn codegen_fat_ptr_binop( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, op: mir::BinOp, - lhs_addr: &'ll Value, - lhs_extra: &'ll Value, - rhs_addr: &'ll Value, - rhs_extra: &'ll Value, + lhs_addr: Bx::Value, + lhs_extra: Bx::Value, + rhs_addr: Bx::Value, + rhs_extra: Bx::Value, _input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Bx::Value { match op { mir::BinOp::Eq => { - bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra); + bx.and(lhs, rhs) } mir::BinOp::Ne => { - bx.or( - bx.icmp(llvm::IntNE, lhs_addr, rhs_addr), - bx.icmp(llvm::IntNE, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra); + bx.or(lhs, rhs) } mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => { // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) let (op, strict_op) = match op { - mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT), - mir::BinOp::Le => (llvm::IntULE, llvm::IntULT), - mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT), - mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT), + mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT), + mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT), + mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT), + mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT), _ => bug!(), }; - - bx.or( - bx.icmp(strict_op, lhs_addr, rhs_addr), - bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(op, lhs_extra, rhs_extra) - ) - ) + let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr); + let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let and_rhs = bx.icmp(op, lhs_extra, rhs_extra); + let rhs = bx.and(and_lhs, and_rhs); + bx.or(lhs, rhs) } _ => { bug!("unexpected fat ptr binop"); @@ -672,19 +667,21 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - pub fn codegen_scalar_checked_binop(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, - input_ty: Ty<'tcx>) -> OperandValue<'ll> { + pub fn codegen_scalar_checked_binop( + &mut self, + bx: &mut Bx, + op: mir::BinOp, + lhs: Bx::Value, + rhs: Bx::Value, + input_ty: Ty<'tcx> + ) -> OperandValue { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow() { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, C_bool(bx.cx, false)); + return OperandValue::Pair(val, bx.cx().const_bool(false)); } let (val, of) = match op { @@ -696,19 +693,15 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::BinOp::Mul => OverflowOp::Mul, _ => unreachable!() }; - let intrinsic = get_overflow_intrinsic(oop, bx, input_ty); - let res = bx.call(intrinsic, &[lhs, rhs], None); - - (bx.extract_value(res, 0), - bx.extract_value(res, 1)) + bx.checked_binop(oop, input_ty, lhs, rhs) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); - let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); + let lhs_llty = bx.cx().val_ty(lhs); + let rhs_llty = bx.cx().val_ty(rhs); + let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -720,7 +713,9 @@ impl FunctionCx<'a, 'll, 'tcx> { OperandValue::Pair(val, of) } +} +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { mir::Rvalue::Ref(..) | @@ -735,7 +730,7 @@ impl FunctionCx<'a, 'll, 'tcx> { true, mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(&ty); self.cx.layout_of(ty).is_zst() } @@ -745,85 +740,19 @@ impl FunctionCx<'a, 'll, 'tcx> { } } -#[derive(Copy, Clone)] -enum OverflowOp { - Add, Sub, Mul -} - -fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value { - use syntax::ast::IntTy::*; - use syntax::ast::UintTy::*; - use rustc::ty::{Int, Uint}; - - let tcx = bx.tcx(); - - let new_sty = match ty.sty { - Int(Isize) => Int(tcx.sess.target.isize_ty), - Uint(Usize) => Uint(tcx.sess.target.usize_ty), - ref t @ Uint(_) | ref t @ Int(_) => t.clone(), - _ => panic!("tried to get overflow intrinsic for op applied to non-int type") - }; - - let name = match oop { - OverflowOp::Add => match new_sty { - Int(I8) => "llvm.sadd.with.overflow.i8", - Int(I16) => "llvm.sadd.with.overflow.i16", - Int(I32) => "llvm.sadd.with.overflow.i32", - Int(I64) => "llvm.sadd.with.overflow.i64", - Int(I128) => "llvm.sadd.with.overflow.i128", - - Uint(U8) => "llvm.uadd.with.overflow.i8", - Uint(U16) => "llvm.uadd.with.overflow.i16", - Uint(U32) => "llvm.uadd.with.overflow.i32", - Uint(U64) => "llvm.uadd.with.overflow.i64", - Uint(U128) => "llvm.uadd.with.overflow.i128", - - _ => unreachable!(), - }, - OverflowOp::Sub => match new_sty { - Int(I8) => "llvm.ssub.with.overflow.i8", - Int(I16) => "llvm.ssub.with.overflow.i16", - Int(I32) => "llvm.ssub.with.overflow.i32", - Int(I64) => "llvm.ssub.with.overflow.i64", - Int(I128) => "llvm.ssub.with.overflow.i128", - - Uint(U8) => "llvm.usub.with.overflow.i8", - Uint(U16) => "llvm.usub.with.overflow.i16", - Uint(U32) => "llvm.usub.with.overflow.i32", - Uint(U64) => "llvm.usub.with.overflow.i64", - Uint(U128) => "llvm.usub.with.overflow.i128", - - _ => unreachable!(), - }, - OverflowOp::Mul => match new_sty { - Int(I8) => "llvm.smul.with.overflow.i8", - Int(I16) => "llvm.smul.with.overflow.i16", - Int(I32) => "llvm.smul.with.overflow.i32", - Int(I64) => "llvm.smul.with.overflow.i64", - Int(I128) => "llvm.smul.with.overflow.i128", - - Uint(U8) => "llvm.umul.with.overflow.i8", - Uint(U16) => "llvm.umul.with.overflow.i16", - Uint(U32) => "llvm.umul.with.overflow.i32", - Uint(U64) => "llvm.umul.with.overflow.i64", - Uint(U128) => "llvm.umul.with.overflow.i128", - - _ => unreachable!(), - }, - }; - - bx.cx.get_intrinsic(&name) -} - -fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, - signed: bool, - x: &'ll Value, - int_ty: &'ll Type, - float_ty: &'ll Type) -> &'ll Value { +fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + signed: bool, + x: Bx::Value, + int_ty: Bx::Type, + float_ty: Bx::Type +) -> Bx::Value { // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. - let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32; + let is_u128_to_f32 = !signed && + bx.cx().int_width(int_ty) == 128 && + bx.cx().float_width(float_ty) == 32; if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. @@ -831,11 +760,12 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); - let overflow = bx.icmp(llvm::IntUGE, x, max); - let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); - let infinity = consts::bitcast(infinity_bits, float_ty); - bx.select(overflow, infinity, bx.uitofp(x, float_ty)) + let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let overflow = bx.icmp(IntPredicate::IntUGE, x, max); + let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); + let infinity = bx.bitcast(infinity_bits, float_ty); + let fp = bx.uitofp(x, float_ty); + bx.select(overflow, infinity, fp) } else { if signed { bx.sitofp(x, float_ty) @@ -845,20 +775,25 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, } } -fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, - signed: bool, - x: &'ll Value, - float_ty: &'ll Type, - int_ty: &'ll Type) -> &'ll Value { +fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + signed: bool, + x: Bx::Value, + float_ty: Bx::Type, + int_ty: Bx::Type +) -> Bx::Value { let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; - if !bx.sess().opts.debugging_opts.saturating_float_casts { + if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } + + let int_width = bx.cx().int_width(int_ty); + let float_width = bx.cx().float_width(float_ty); // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the // destination integer type after rounding towards zero. This `undef` value can cause UB in // safe code (see issue #10184), so we implement a saturating conversion on top of it: @@ -878,39 +813,50 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - fn compute_clamp_bounds(signed: bool, int_ty: &Type) -> (u128, u128) { - let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero); - assert_eq!(rounded_min.status, Status::OK); - let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero); - assert!(rounded_max.value.is_finite()); - (rounded_min.value.to_bits(), rounded_max.value.to_bits()) - } - fn int_max(signed: bool, int_ty: &Type) -> u128 { - let shift_amount = 128 - int_ty.int_width(); + let int_max = |signed: bool, int_width: u64| -> u128 { + let shift_amount = 128 - int_width; if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } - } - fn int_min(signed: bool, int_ty: &Type) -> i128 { + }; + let int_min = |signed: bool, int_width: u64| -> i128 { if signed { - i128::MIN >> (128 - int_ty.int_width()) + i128::MIN >> (128 - int_width) } else { 0 } - } - let float_bits_to_llval = |bits| { - let bits_llval = match float_ty.float_width() { - 32 => C_u32(bx.cx, bits as u32), - 64 => C_u64(bx.cx, bits as u64), + }; + + let compute_clamp_bounds_single = + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let compute_clamp_bounds_double = + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + + let mut float_bits_to_llval = |bits| { + let bits_llval = match float_width { + 32 => bx.cx().const_u32(bits as u32), + 64 => bx.cx().const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; - consts::bitcast(bits_llval, float_ty) + bx.bitcast(bits_llval, float_ty) }; - let (f_min, f_max) = match float_ty.float_width() { - 32 => compute_clamp_bounds::(signed, int_ty), - 64 => compute_clamp_bounds::(signed, int_ty), + let (f_min, f_max) = match float_width { + 32 => compute_clamp_bounds_single(signed, int_width), + 64 => compute_clamp_bounds_double(signed, int_width), n => bug!("unsupported float width {}", n), }; let f_min = float_bits_to_llval(f_min); @@ -956,10 +902,10 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // negation, and the negation can be merged into the select. Therefore, it not necessarily any // more expensive than a ordered ("normal") comparison. Whether these optimizations will be // performed is ultimately up to the backend, but at least x86 does perform them. - let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min); - let greater = bx.fcmp(llvm::RealOGT, x, f_max); - let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); + let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); + let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); + let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -968,7 +914,9 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0)) + let zero = bx.cx().const_uint(int_ty, 0); + let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); + bx.select(cmp, s1, zero) } else { s1 } diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs similarity index 71% rename from src/librustc_codegen_llvm/mir/statement.rs rename to src/librustc_codegen_ssa/mir/statement.rs index 93be0074f6..568a7e7e16 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -10,21 +10,21 @@ use rustc::mir; -use asm; -use builder::Builder; - +use traits::BuilderMethods; use super::FunctionCx; use super::LocalRef; use super::OperandValue; +use traits::*; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_statement(&mut self, - bx: Builder<'a, 'll, 'tcx>, - statement: &mir::Statement<'tcx>) - -> Builder<'a, 'll, 'tcx> { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_statement( + &mut self, + mut bx: Bx, + statement: &mir::Statement<'tcx> + ) -> Bx { debug!("codegen_statement(statement={:?})", statement); - self.set_debug_loc(&bx, statement.source_info); + self.set_debug_loc(&mut bx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref place, ref rvalue) => { if let mir::Place::Local(index) = *place { @@ -53,53 +53,50 @@ impl FunctionCx<'a, 'll, 'tcx> { } } } else { - let cg_dest = self.codegen_place(&bx, place); + let cg_dest = self.codegen_place(&mut bx, place); self.codegen_rvalue(bx, cg_dest, rvalue) } } mir::StatementKind::SetDiscriminant{ref place, variant_index} => { - self.codegen_place(&bx, place) - .codegen_set_discr(&bx, variant_index); + self.codegen_place(&mut bx, place) + .codegen_set_discr(&mut bx, variant_index); bx } mir::StatementKind::StorageLive(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_live(&bx); + cg_place.storage_live(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_live(&bx); + cg_indirect_place.storage_live(&mut bx); } bx } mir::StatementKind::StorageDead(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_dead(&bx); + cg_place.storage_dead(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_dead(&bx); + cg_indirect_place.storage_dead(&mut bx); } bx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { - self.codegen_place(&bx, output) + self.codegen_place(&mut bx, output) }).collect(); let input_vals = inputs.iter() - .try_fold(Vec::with_capacity(inputs.len()), |mut acc, input| { - let op = self.codegen_operand(&bx, input); + .fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| { + let op = self.codegen_operand(&mut bx, input); if let OperandValue::Immediate(_) = op.val { acc.push(op.immediate()); - Ok(acc) } else { - Err(op) + span_err!(bx.sess(), span.to_owned(), E0669, + "invalid value for constraint in inline assembly"); } + acc }); - if input_vals.is_err() { - span_err!(bx.sess(), statement.source_info.span, E0669, - "invalid value for constraint in inline assembly"); - } else { - let input_vals = input_vals.unwrap(); - let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals); + if input_vals.len() == inputs.len() { + let res = bx.codegen_inline_asm(asm, outputs, input_vals); if !res { span_err!(bx.sess(), statement.source_info.span, E0668, "malformed inline assembly"); @@ -108,8 +105,8 @@ impl FunctionCx<'a, 'll, 'tcx> { bx } mir::StatementKind::FakeRead(..) | - mir::StatementKind::EndRegion(_) | - mir::StatementKind::Validate(..) | + mir::StatementKind::Retag { .. } | + mir::StatementKind::EscapeToRaw { .. } | mir::StatementKind::AscribeUserType(..) | mir::StatementKind::Nop => bx, } diff --git a/src/librustc_codegen_ssa/mono_item.rs b/src/librustc_codegen_ssa/mono_item.rs new file mode 100644 index 0000000000..8fe8979196 --- /dev/null +++ b/src/librustc_codegen_ssa/mono_item.rs @@ -0,0 +1,111 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use base; +use rustc::hir; +use rustc::hir::def::Def; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty::layout::HasTyCtxt; +use std::fmt; +use traits::*; + +pub use rustc::mir::mono::MonoItem; + +pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; + +pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { + fn define>(&self, cx: &'a Bx::CodegenCx) { + debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + let tcx = cx.tcx(); + let is_mutable = match tcx.describe_def(def_id) { + Some(Def::Static(_, is_mutable)) => is_mutable, + Some(other) => { + bug!("Expected Def::Static, found {:?}", other) + } + None => { + bug!("Expected Def::Static for {:?}, found nothing", def_id) + } + }; + cx.codegen_static(def_id, is_mutable); + } + MonoItem::GlobalAsm(node_id) => { + let item = cx.tcx().hir.expect_item(node_id); + if let hir::ItemKind::GlobalAsm(ref ga) = item.node { + cx.codegen_global_asm(ga); + } else { + span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") + } + } + MonoItem::Fn(instance) => { + base::codegen_instance::(&cx, instance); + } + } + + debug!("END IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + } + + fn predefine>( + &self, + cx: &'a Bx::CodegenCx, + linkage: Linkage, + visibility: Visibility + ) { + debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + + let symbol_name = self.symbol_name(cx.tcx()).as_str(); + + debug!("symbol {}", &symbol_name); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + cx.predefine_static(def_id, linkage, visibility, &symbol_name); + } + MonoItem::Fn(instance) => { + cx.predefine_fn(instance, linkage, visibility, &symbol_name); + } + MonoItem::GlobalAsm(..) => {} + } + + debug!("END PREDEFINING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + } + + fn to_raw_string(&self) -> String { + match *self.as_mono_item() { + MonoItem::Fn(instance) => { + format!("Fn({:?}, {})", + instance.def, + instance.substs.as_ptr() as usize) + } + MonoItem::Static(id) => { + format!("Static({:?})", id) + } + MonoItem::GlobalAsm(id) => { + format!("GlobalAsm({:?})", id) + } + } + } +} + +impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} diff --git a/src/librustc_codegen_ssa/traits/abi.rs b/src/librustc_codegen_ssa/traits/abi.rs new file mode 100644 index 0000000000..c659a99e1c --- /dev/null +++ b/src/librustc_codegen_ssa/traits/abi.rs @@ -0,0 +1,23 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use rustc::ty::{FnSig, Instance, Ty}; +use rustc_target::abi::call::FnType; + +pub trait AbiMethods<'tcx> { + fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>; + fn new_vtable(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>; + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>; +} + +pub trait AbiBuilderMethods<'tcx>: BackendTypes { + fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value); +} diff --git a/src/librustc_codegen_ssa/traits/asm.rs b/src/librustc_codegen_ssa/traits/asm.rs new file mode 100644 index 0000000000..0e56fe46a3 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/asm.rs @@ -0,0 +1,27 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use mir::place::PlaceRef; +use rustc::hir::{GlobalAsm, InlineAsm}; + +pub trait AsmBuilderMethods<'tcx>: BackendTypes { + /// Take an inline assembly expression and splat it out via LLVM + fn codegen_inline_asm( + &mut self, + ia: &InlineAsm, + outputs: Vec>, + inputs: Vec, + ) -> bool; +} + +pub trait AsmMethods<'tcx> { + fn codegen_global_asm(&self, ga: &GlobalAsm); +} diff --git a/src/librustc_codegen_ssa/traits/backend.rs b/src/librustc_codegen_ssa/traits/backend.rs new file mode 100644 index 0000000000..b59f970ae0 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/backend.rs @@ -0,0 +1,66 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::layout::{HasTyCtxt, LayoutOf, TyLayout}; +use rustc::ty::Ty; + +use super::write::WriteBackendMethods; +use super::CodegenObject; +use rustc::middle::allocator::AllocatorKind; +use rustc::middle::cstore::EncodedMetadata; +use rustc::mir::mono::Stats; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc_codegen_utils::codegen_backend::CodegenBackend; +use std::sync::Arc; +use syntax_pos::symbol::InternedString; + +pub trait BackendTypes { + type Value: CodegenObject; + type BasicBlock: Copy; + type Type: CodegenObject; + type Funclet; + + type DIScope: Copy; +} + +pub trait Backend<'tcx>: + Sized + BackendTypes + HasTyCtxt<'tcx> + LayoutOf, TyLayout = TyLayout<'tcx>> +{ +} + +impl<'tcx, T> Backend<'tcx> for T where + Self: BackendTypes + HasTyCtxt<'tcx> + LayoutOf, TyLayout = TyLayout<'tcx>> +{ +} + +pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send { + fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Module; + fn write_metadata<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + metadata: &Self::Module, + ) -> EncodedMetadata; + fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Module, kind: AllocatorKind); + fn compile_codegen_unit<'a, 'tcx: 'a>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu_name: InternedString, + ) -> Stats; + // If find_features is true this won't access `sess.crate_types` by assuming + // that `is_pie_binary` is false. When we discover LLVM target features + // `sess.crate_types` is uninitialized so we cannot access it. + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool, + ) -> Arc Result + Send + Sync>; + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str; +} diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs new file mode 100644 index 0000000000..c1349329c1 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -0,0 +1,334 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::abi::AbiBuilderMethods; +use super::asm::AsmBuilderMethods; +use super::debuginfo::DebugInfoBuilderMethods; +use super::intrinsic::IntrinsicCallMethods; +use super::type_::ArgTypeMethods; +use super::{HasCodegen, StaticBuilderMethods}; +use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; +use mir::operand::OperandRef; +use mir::place::PlaceRef; +use rustc::ty::Ty; +use rustc::ty::layout::{Align, Size}; +use std::ffi::CStr; +use MemFlags; + +use std::borrow::Cow; +use std::ops::Range; +use syntax::ast::AsmDialect; + +#[derive(Copy, Clone)] +pub enum OverflowOp { + Add, + Sub, + Mul, +} + +pub trait BuilderMethods<'a, 'tcx: 'a>: + HasCodegen<'tcx> + + DebugInfoBuilderMethods<'tcx> + + ArgTypeMethods<'tcx> + + AbiBuilderMethods<'tcx> + + IntrinsicCallMethods<'tcx> + + AsmBuilderMethods<'tcx> + + StaticBuilderMethods<'tcx> +{ + fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; + fn with_cx(cx: &'a Self::CodegenCx) -> Self; + fn build_sibling_block<'b>(&self, name: &'b str) -> Self; + fn cx(&self) -> &Self::CodegenCx; + fn llfn(&self) -> Self::Value; + fn llbb(&self) -> Self::BasicBlock; + fn count_insn(&self, category: &str); + + fn set_value_name(&mut self, value: Self::Value, name: &str); + fn position_at_end(&mut self, llbb: Self::BasicBlock); + fn position_at_start(&mut self, llbb: Self::BasicBlock); + fn ret_void(&mut self); + fn ret(&mut self, v: Self::Value); + fn br(&mut self, dest: Self::BasicBlock); + fn cond_br( + &mut self, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, + ); + fn switch( + &mut self, + v: Self::Value, + else_llbb: Self::BasicBlock, + num_cases: usize, + ) -> Self::Value; + fn invoke( + &mut self, + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + funclet: Option<&Self::Funclet>, + ) -> Self::Value; + fn unreachable(&mut self); + fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn neg(&mut self, v: Self::Value) -> Self::Value; + fn fneg(&mut self, v: Self::Value) -> Self::Value; + fn not(&mut self, v: Self::Value) -> Self::Value; + + fn checked_binop( + &mut self, + oop: OverflowOp, + ty: Ty, + lhs: Self::Value, + rhs: Self::Value, + ) -> (Self::Value, Self::Value); + + fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn array_alloca( + &mut self, + ty: Self::Type, + len: Self::Value, + name: &str, + align: Align, + ) -> Self::Value; + + fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; + fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; + fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; + fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) + -> OperandRef<'tcx, Self::Value>; + + fn range_metadata(&mut self, load: Self::Value, range: Range); + fn nonnull_metadata(&mut self, load: Self::Value); + + fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; + fn store_with_flags( + &mut self, + val: Self::Value, + ptr: Self::Value, + align: Align, + flags: MemFlags, + ) -> Self::Value; + fn atomic_store( + &mut self, + val: Self::Value, + ptr: Self::Value, + order: AtomicOrdering, + size: Size, + ); + + fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; + + fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; + fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + fn empty_phi(&mut self, ty: Self::Type) -> Self::Value; + fn phi( + &mut self, + ty: Self::Type, + vals: &[Self::Value], + bbs: &[Self::BasicBlock], + ) -> Self::Value; + fn inline_asm_call( + &mut self, + asm: &CStr, + cons: &CStr, + inputs: &[Self::Value], + output: Self::Type, + volatile: bool, + alignstack: bool, + dia: AsmDialect, + ) -> Option; + + fn memcpy( + &mut self, + dst: Self::Value, + dst_align: Align, + src: Self::Value, + src_align: Align, + size: Self::Value, + flags: MemFlags, + ); + fn memmove( + &mut self, + dst: Self::Value, + dst_align: Align, + src: Self::Value, + src_align: Align, + size: Self::Value, + flags: MemFlags, + ); + fn memset( + &mut self, + ptr: Self::Value, + fill_byte: Self::Value, + size: Self::Value, + align: Align, + flags: MemFlags, + ); + + fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn select( + &mut self, + cond: Self::Value, + then_val: Self::Value, + else_val: Self::Value, + ) -> Self::Value; + + fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; + fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; + fn insert_element( + &mut self, + vec: Self::Value, + elt: Self::Value, + idx: Self::Value, + ) -> Self::Value; + fn shuffle_vector( + &mut self, + v1: Self::Value, + v2: Self::Value, + mask: Self::Value, + ) -> Self::Value; + fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; + fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; + fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; + fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; + fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; + fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; + fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; + + fn landing_pad( + &mut self, + ty: Self::Type, + pers_fn: Self::Value, + num_clauses: usize, + ) -> Self::Value; + fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value); + fn set_cleanup(&mut self, landing_pad: Self::Value); + fn resume(&mut self, exn: Self::Value) -> Self::Value; + fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; + fn cleanup_ret( + &mut self, + funclet: &Self::Funclet, + unwind: Option, + ) -> Self::Value; + fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; + fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value; + fn catch_switch( + &mut self, + parent: Option, + unwind: Option, + num_handlers: usize, + ) -> Self::Value; + fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); + fn set_personality_fn(&mut self, personality: Self::Value); + + fn atomic_cmpxchg( + &mut self, + dst: Self::Value, + cmp: Self::Value, + src: Self::Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, + ) -> Self::Value; + fn atomic_rmw( + &mut self, + op: AtomicRmwBinOp, + dst: Self::Value, + src: Self::Value, + order: AtomicOrdering, + ) -> Self::Value; + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); + fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); + fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); + fn set_invariant_load(&mut self, load: Self::Value); + + /// Returns the ptr value that should be used for storing `val`. + fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value; + + /// Returns the args that should be used for a call to `llfn`. + fn check_call<'b>( + &mut self, + typ: &str, + llfn: Self::Value, + args: &'b [Self::Value], + ) -> Cow<'b, [Self::Value]> + where + [Self::Value]: ToOwned; + + /// Called for `StorageLive` + fn lifetime_start(&mut self, ptr: Self::Value, size: Size); + + /// Called for `StorageDead` + fn lifetime_end(&mut self, ptr: Self::Value, size: Size); + + fn call( + &mut self, + llfn: Self::Value, + args: &[Self::Value], + funclet: Option<&Self::Funclet>, + ) -> Self::Value; + fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + unsafe fn delete_basic_block(&mut self, bb: Self::BasicBlock); + fn do_not_inline(&mut self, llret: Self::Value); +} diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs new file mode 100644 index 0000000000..af49410794 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -0,0 +1,65 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use mir::place::PlaceRef; +use rustc::mir::interpret::Allocation; +use rustc::mir::interpret::Scalar; +use rustc::ty::layout; +use syntax::symbol::LocalInternedString; + +pub trait ConstMethods<'tcx>: BackendTypes { + // Constant constructors + fn const_null(&self, t: Self::Type) -> Self::Value; + fn const_undef(&self, t: Self::Type) -> Self::Value; + fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; + fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value; + fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; + fn const_bool(&self, val: bool) -> Self::Value; + fn const_i32(&self, i: i32) -> Self::Value; + fn const_u32(&self, i: u32) -> Self::Value; + fn const_u64(&self, i: u64) -> Self::Value; + fn const_usize(&self, i: u64) -> Self::Value; + fn const_u8(&self, i: u8) -> Self::Value; + + // This is a 'c-like' raw string, which differs from + // our boxed-and-length-annotated strings. + fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value; + + fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; + fn const_fat_ptr(&self, ptr: Self::Value, meta: Self::Value) -> Self::Value; + fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value; + fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; + fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; + fn const_bytes(&self, bytes: &[u8]) -> Self::Value; + + fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; + fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; + fn const_to_uint(&self, v: Self::Value) -> u64; + fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; + + fn is_const_integral(&self, v: Self::Value) -> bool; + fn is_const_real(&self, v: Self::Value) -> bool; + + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: Self::Type, + ) -> Self::Value; + fn from_const_alloc( + &self, + layout: layout::TyLayout<'tcx>, + alloc: &Allocation, + offset: layout::Size, + ) -> PlaceRef<'tcx, Self::Value>; + + fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; +} diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs new file mode 100644 index 0000000000..c4becf3705 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/debuginfo.rs @@ -0,0 +1,71 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; +use rustc::hir::def_id::CrateNum; +use rustc::mir; +use rustc::ty::{self, Ty}; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc_mir::monomorphize::Instance; +use syntax::ast::Name; +use syntax_pos::{SourceFile, Span}; + +pub trait DebugInfoMethods<'tcx>: BackendTypes { + fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value); + + /// Creates the function-specific debug context. + /// + /// Returns the FunctionDebugContext for the function which holds state needed + /// for debug info creation. The function may also return another variant of the + /// FunctionDebugContext enum which indicates why no debuginfo should be created + /// for the function. + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: Self::Value, + mir: &mir::Mir, + ) -> FunctionDebugContext; + + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext, + ) -> IndexVec>; + fn extend_scope_to_file( + &self, + scope_metadata: Self::DIScope, + file: &SourceFile, + defining_crate: CrateNum, + ) -> Self::DIScope; + fn debuginfo_finalize(&self); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4]; +} + +pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes { + fn declare_local( + &mut self, + dbg_context: &FunctionDebugContext, + variable_name: Name, + variable_type: Ty<'tcx>, + scope_metadata: Self::DIScope, + variable_access: VariableAccess<'_, Self::Value>, + variable_kind: VariableKind, + span: Span, + ); + fn set_source_location( + &mut self, + debug_context: &FunctionDebugContext, + scope: Option, + span: Span, + ); + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); +} diff --git a/src/librustc_codegen_ssa/traits/declare.rs b/src/librustc_codegen_ssa/traits/declare.rs new file mode 100644 index 0000000000..f9a2965284 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/declare.rs @@ -0,0 +1,89 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use rustc::hir::def_id::DefId; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty; +use rustc_mir::monomorphize::Instance; + +pub trait DeclareMethods<'tcx>: BackendTypes { + /// Declare a global value. + /// + /// If there’s a value with the same name already declared, the function will + /// return its Value instead. + fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value; + + /// Declare a C ABI function. + /// + /// Only use this for foreign function ABIs and glue. For Rust functions use + /// `declare_fn` instead. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_cfn(&self, name: &str, fn_type: Self::Type) -> Self::Value; + + /// Declare a Rust function. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_fn(&self, name: &str, sig: ty::PolyFnSig<'tcx>) -> Self::Value; + + /// Declare a global with an intention to define it. + /// + /// Use this function when you intend to define a global. This function will + /// return None if the name already has a definition associated with it. In that + /// case an error should be reported to the user, because it usually happens due + /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). + fn define_global(&self, name: &str, ty: Self::Type) -> Option; + + /// Declare a private global + /// + /// Use this function when you intend to define a global without a name. + fn define_private_global(&self, ty: Self::Type) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_internal_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value; + + /// Get declared value by name. + fn get_declared_value(&self, name: &str) -> Option; + + /// Get defined or externally defined (AvailableExternally linkage) value by + /// name. + fn get_defined_value(&self, name: &str) -> Option; +} + +pub trait PreDefineMethods<'tcx>: BackendTypes { + fn predefine_static( + &self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str, + ); + fn predefine_fn( + &self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str, + ); +} diff --git a/src/librustc_codegen_ssa/traits/intrinsic.rs b/src/librustc_codegen_ssa/traits/intrinsic.rs new file mode 100644 index 0000000000..abc118e770 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/intrinsic.rs @@ -0,0 +1,33 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use mir::operand::OperandRef; +use rustc::ty::Ty; +use rustc_target::abi::call::FnType; +use syntax_pos::Span; + +pub trait IntrinsicCallMethods<'tcx>: BackendTypes { + /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, + /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, + /// add them to librustc_codegen_llvm/context.rs + fn codegen_intrinsic_call( + &mut self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, Self::Value>], + llresult: Self::Value, + span: Span, + ); + + fn abort(&mut self); + fn assume(&mut self, val: Self::Value); + fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value; +} diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs new file mode 100644 index 0000000000..d8871dd3a5 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -0,0 +1,40 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use libc::c_uint; +use rustc::mir::mono::Stats; +use rustc::session::Session; +use rustc::ty::{self, Instance, Ty}; +use rustc::util::nodemap::FxHashMap; +use rustc_mir::monomorphize::partitioning::CodegenUnit; +use std::cell::RefCell; +use std::sync::Arc; + +pub trait MiscMethods<'tcx>: BackendTypes { + fn vtables( + &self, + ) -> &RefCell, ty::PolyExistentialTraitRef<'tcx>), Self::Value>>; + fn check_overflow(&self) -> bool; + fn instances(&self) -> &RefCell, Self::Value>>; + fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; + fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; + fn eh_personality(&self) -> Self::Value; + fn eh_unwind_resume(&self) -> Self::Value; + fn sess(&self) -> &Session; + fn stats(&self) -> &RefCell; + fn consume_stats(self) -> RefCell; + fn codegen_unit(&self) -> &Arc>; + fn closure_env_needs_indirect_debuginfo(&self) -> bool; + fn used_statics(&self) -> &RefCell>; + fn set_frame_pointer_elimination(&self, llfn: Self::Value); + fn apply_target_cpu_attr(&self, llfn: Self::Value); + fn create_used_variable(&self); +} diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs new file mode 100644 index 0000000000..6251fc3d3f --- /dev/null +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -0,0 +1,99 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Interface of a Rust codegen backend +//! +//! This crate defines all the traits that have to be implemented by a codegen backend in order to +//! use the backend-agnostic codegen code in `rustc_codegen_ssa`. +//! +//! The interface is designed around two backend-specific data structures, the codegen context and +//! the builder. The codegen context is supposed to be read-only after its creation and during the +//! actual codegen, while the builder stores the information about the function during codegen and +//! is used to produce the instructions of the backend IR. +//! +//! Finaly, a third `Backend` structure has to implement methods related to how codegen information +//! is passed to the backend, especially for asynchronous compilation. +//! +//! The traits contain associated types that are backend-specific, such as the backend's value or +//! basic blocks. + +mod abi; +mod asm; +mod backend; +mod builder; +mod consts; +mod debuginfo; +mod declare; +mod intrinsic; +mod misc; +mod statics; +mod type_; +mod write; + +pub use self::abi::{AbiBuilderMethods, AbiMethods}; +pub use self::asm::{AsmBuilderMethods, AsmMethods}; +pub use self::backend::{Backend, BackendTypes, ExtraBackendMethods}; +pub use self::builder::{BuilderMethods, OverflowOp}; +pub use self::consts::ConstMethods; +pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods}; +pub use self::declare::{DeclareMethods, PreDefineMethods}; +pub use self::intrinsic::IntrinsicCallMethods; +pub use self::misc::MiscMethods; +pub use self::statics::{StaticMethods, StaticBuilderMethods}; +pub use self::type_::{ + ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods, +}; +pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; + +use std::fmt; + +pub trait CodegenObject: Copy + PartialEq + fmt::Debug {} +impl CodegenObject for T {} + +pub trait CodegenMethods<'tcx>: + Backend<'tcx> + + TypeMethods<'tcx> + + MiscMethods<'tcx> + + ConstMethods<'tcx> + + StaticMethods + + DebugInfoMethods<'tcx> + + AbiMethods<'tcx> + + DeclareMethods<'tcx> + + AsmMethods<'tcx> + + PreDefineMethods<'tcx> +{ +} + +impl<'tcx, T> CodegenMethods<'tcx> for T where + Self: Backend<'tcx> + + TypeMethods<'tcx> + + MiscMethods<'tcx> + + ConstMethods<'tcx> + + StaticMethods + + DebugInfoMethods<'tcx> + + AbiMethods<'tcx> + + DeclareMethods<'tcx> + + AsmMethods<'tcx> + + PreDefineMethods<'tcx> +{ +} + +pub trait HasCodegen<'tcx>: + Backend<'tcx> + ::std::ops::Deref>::CodegenCx> +{ + type CodegenCx: CodegenMethods<'tcx> + + BackendTypes< + Value = Self::Value, + BasicBlock = Self::BasicBlock, + Type = Self::Type, + Funclet = Self::Funclet, + DIScope = Self::DIScope, + >; +} diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs new file mode 100644 index 0000000000..0e665fc29f --- /dev/null +++ b/src/librustc_codegen_ssa/traits/statics.rs @@ -0,0 +1,22 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::BackendTypes; +use rustc::hir::def_id::DefId; +use rustc::ty::layout::Align; + +pub trait StaticMethods: BackendTypes { + fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value; + fn codegen_static(&self, def_id: DefId, is_mutable: bool); +} + +pub trait StaticBuilderMethods<'tcx>: BackendTypes { + fn get_static(&self, def_id: DefId) -> Self::Value; +} diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs new file mode 100644 index 0000000000..1d31bdfa9f --- /dev/null +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -0,0 +1,204 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::misc::MiscMethods; +use super::Backend; +use super::HasCodegen; +use common::{self, TypeKind}; +use mir::place::PlaceRef; +use rustc::ty::layout::{self, Align, Size, TyLayout}; +use rustc::ty::{self, Ty}; +use rustc::util::nodemap::FxHashMap; +use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; +use std::cell::RefCell; +use syntax::ast; + +// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use +// `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves. +pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { + fn type_void(&self) -> Self::Type; + fn type_metadata(&self) -> Self::Type; + fn type_i1(&self) -> Self::Type; + fn type_i8(&self) -> Self::Type; + fn type_i16(&self) -> Self::Type; + fn type_i32(&self) -> Self::Type; + fn type_i64(&self) -> Self::Type; + fn type_i128(&self) -> Self::Type; + + // Creates an integer type with the given number of bits, e.g. i24 + fn type_ix(&self, num_bits: u64) -> Self::Type; + fn type_isize(&self) -> Self::Type; + + fn type_f32(&self) -> Self::Type; + fn type_f64(&self) -> Self::Type; + fn type_x86_mmx(&self) -> Self::Type; + + fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; + fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_kind(&self, ty: Self::Type) -> TypeKind; + fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; + fn element_type(&self, ty: Self::Type) -> Self::Type; + + /// Return the number of elements in `self` if it is a LLVM vector type. + fn vector_length(&self, ty: Self::Type) -> usize; + + fn func_params_types(&self, ty: Self::Type) -> Vec; + fn float_width(&self, ty: Self::Type) -> usize; + + /// Retrieve the bit width of the integer type `self`. + fn int_width(&self, ty: Self::Type) -> u64; + + fn val_ty(&self, v: Self::Value) -> Self::Type; + fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; +} + +pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { + fn type_bool(&self) -> Self::Type { + self.type_i8() + } + + fn type_i8p(&self) -> Self::Type { + self.type_ptr_to(self.type_i8()) + } + + fn type_int(&self) -> Self::Type { + match &self.sess().target.target.target_c_int_width[..] { + "16" => self.type_i16(), + "32" => self.type_i32(), + "64" => self.type_i64(), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + + fn type_int_from_ty(&self, t: ast::IntTy) -> Self::Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + + fn type_uint_from_ty(&self, t: ast::UintTy) -> Self::Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + + fn type_float_from_ty(&self, t: ast::FloatTy) -> Self::Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + fn type_from_integer(&self, i: layout::Integer) -> Self::Type { + use rustc::ty::layout::Integer::*; + match i { + I8 => self.type_i8(), + I16 => self.type_i16(), + I32 => self.type_i32(), + I64 => self.type_i64(), + I128 => self.type_i128(), + } + } + + fn type_pointee_for_align(&self, align: Align) -> Self::Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_align(self, align); + self.type_from_integer(ity) + } + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. + fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { + let unit = layout::Integer::approximate_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + common::type_needs_drop(self.tcx(), ty) + } + + fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + common::type_is_sized(self.tcx(), ty) + } + + fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { + common::type_is_freeze(self.tcx(), ty) + } + + fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { + use syntax_pos::DUMMY_SP; + if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { + return false; + } + + let tail = self.tcx().struct_tail(ty); + match tail.sty { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail.sty), + } + } +} + +impl DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {} + +pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> { + fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; + fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; + fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; + fn reg_backend_type(&self, ty: &Reg) -> Self::Type; + fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; + fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool; + fn is_backend_scalar_pair(&self, layout: TyLayout<'tcx>) -> bool; + fn backend_field_index(&self, layout: TyLayout<'tcx>, index: usize) -> u64; + fn scalar_pair_element_backend_type<'a>( + &self, + layout: TyLayout<'tcx>, + index: usize, + immediate: bool, + ) -> Self::Type; +} + +pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> { + fn store_fn_arg( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, + dst: PlaceRef<'tcx, Self::Value>, + ); + fn store_arg_ty( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: Self::Value, + dst: PlaceRef<'tcx, Self::Value>, + ); + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> Self::Type; +} + +pub trait TypeMethods<'tcx>: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} + +impl TypeMethods<'tcx> for T where Self: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} diff --git a/src/librustc_codegen_ssa/traits/write.rs b/src/librustc_codegen_ssa/traits/write.rs new file mode 100644 index 0000000000..72522e19af --- /dev/null +++ b/src/librustc_codegen_ssa/traits/write.rs @@ -0,0 +1,72 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use back::write::{CodegenContext, ModuleConfig}; +use {CompiledModule, ModuleCodegen}; + +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use rustc_errors::{FatalError, Handler}; + +pub trait WriteBackendMethods: 'static + Sized + Clone { + type Module: Send + Sync; + type TargetMachine; + type ModuleBuffer: ModuleBufferMethods; + type Context: ?Sized; + type ThinData: Send + Sync; + type ThinBuffer: ThinBufferMethods; + + /// Performs LTO, which in the case of full LTO means merging all modules into + /// a single one and returning it for further optimizing. For ThinLTO, it will + /// do the global analysis necessary and return two lists, one of the modules + /// the need optimization and another for modules that can simply be copied over + /// from the incr. comp. cache. + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline, + ) -> Result<(Vec>, Vec), FatalError>; + fn print_pass_timings(&self); + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline, + ) -> Result<(), FatalError>; + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline, + ) -> Result, FatalError>; + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline, + ) -> Result; + fn run_lto_pass_manager( + cgcx: &CodegenContext, + llmod: &ModuleCodegen, + config: &ModuleConfig, + thin: bool, + ); +} + +pub trait ThinBufferMethods: Send + Sync { + fn data(&self) -> &[u8]; +} + +pub trait ModuleBufferMethods: Send + Sync { + fn data(&self) -> &[u8]; +} diff --git a/src/librustc_codegen_utils/Cargo.toml b/src/librustc_codegen_utils/Cargo.toml index a1f4a323f8..34a09f30b6 100644 --- a/src/librustc_codegen_utils/Cargo.toml +++ b/src/librustc_codegen_utils/Cargo.toml @@ -18,6 +18,6 @@ syntax_pos = { path = "../libsyntax_pos" } rustc = { path = "../librustc" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } +rustc_metadata = { path = "../librustc_metadata" } rustc_mir = { path = "../librustc_mir" } rustc_incremental = { path = "../librustc_incremental" } -rustc_metadata_utils = { path = "../librustc_metadata_utils" } diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index 03b3b20a4e..ea8259d79a 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -33,12 +33,12 @@ extern crate log; #[macro_use] extern crate rustc; extern crate rustc_target; +extern crate rustc_metadata; extern crate rustc_mir; extern crate rustc_incremental; extern crate syntax; extern crate syntax_pos; #[macro_use] extern crate rustc_data_structures; -extern crate rustc_metadata_utils; use rustc::ty::TyCtxt; @@ -60,5 +60,3 @@ pub fn check_for_rustc_errors_attr(tcx: TyCtxt) { } } } - -__build_diagnostic_array! { librustc_codegen_utils, DIAGNOSTICS } diff --git a/src/librustc_codegen_utils/link.rs b/src/librustc_codegen_utils/link.rs index 66e98793f4..b11aa68732 100644 --- a/src/librustc_codegen_utils/link.rs +++ b/src/librustc_codegen_utils/link.rs @@ -13,7 +13,6 @@ use rustc::session::Session; use std::path::{Path, PathBuf}; use syntax::{ast, attr}; use syntax_pos::Span; -use rustc_metadata_utils::validate_crate_name; pub fn out_filename(sess: &Session, crate_type: config::CrateType, @@ -52,7 +51,7 @@ pub fn find_crate_name(sess: Option<&Session>, attrs: &[ast::Attribute], input: &Input) -> String { let validate = |s: String, span: Option| { - validate_crate_name(sess, &s, span); + ::rustc_metadata::validate_crate_name(sess, &s, span); s }; diff --git a/src/librustc_codegen_utils/symbol_names.rs b/src/librustc_codegen_utils/symbol_names.rs index c1e80234a7..344a252578 100644 --- a/src/librustc_codegen_utils/symbol_names.rs +++ b/src/librustc_codegen_utils/symbol_names.rs @@ -79,7 +79,7 @@ //! - In order to be able to also use symbols from two versions of the same //! crate (which naturally also have the same name), a stronger measure is //! required: The compiler accepts an arbitrary "disambiguator" value via the -//! `-C metadata` commandline argument. This disambiguator is then fed into +//! `-C metadata` command-line argument. This disambiguator is then fed into //! the symbol hash of every exported item. Consequently, the symbols in two //! identical crates but with different disambiguators are not in conflict //! with each other. This facility is mainly intended to be used by build @@ -114,6 +114,7 @@ use rustc_mir::monomorphize::Instance; use syntax_pos::symbol::Symbol; use std::fmt::Write; +use std::mem::discriminant; pub fn provide(providers: &mut Providers) { *providers = Providers { @@ -219,6 +220,10 @@ fn get_symbol_hash<'a, 'tcx>( .hash_stable(&mut hcx, &mut hasher); (&tcx.crate_disambiguator(instantiating_crate)).hash_stable(&mut hcx, &mut hasher); } + + // We want to avoid accidental collision between different types of instances. + // Especially, VtableShim may overlap with its original instance without this. + discriminant(&instance.def).hash_stable(&mut hcx, &mut hasher); }); // 64 bits should be enough to avoid collisions. @@ -252,9 +257,9 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance let disambiguator = tcx.sess.local_crate_disambiguator(); return tcx.sess.generate_plugin_registrar_symbol(disambiguator); } - if *tcx.sess.derive_registrar_fn.get() == Some(id) { + if *tcx.sess.proc_macro_decls_static.get() == Some(id) { let disambiguator = tcx.sess.local_crate_disambiguator(); - return tcx.sess.generate_derive_registrar_symbol(disambiguator); + return tcx.sess.generate_proc_macro_decls_symbol(disambiguator); } } @@ -322,7 +327,13 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance let hash = get_symbol_hash(tcx, def_id, instance, instance_ty, substs); - SymbolPathBuffer::from_interned(tcx.def_symbol_name(def_id)).finish(hash) + let mut buf = SymbolPathBuffer::from_interned(tcx.def_symbol_name(def_id)); + + if instance.is_vtable_shim() { + buf.push("{{vtable-shim}}"); + } + + buf.finish(hash) } // Follow C++ namespace-mangling style, see diff --git a/src/librustc_codegen_utils/symbol_names_test.rs b/src/librustc_codegen_utils/symbol_names_test.rs index 47bbd67fb5..6eaf0c1c08 100644 --- a/src/librustc_codegen_utils/symbol_names_test.rs +++ b/src/librustc_codegen_utils/symbol_names_test.rs @@ -32,7 +32,7 @@ pub fn report_symbol_names<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { } tcx.dep_graph.with_ignore(|| { - let mut visitor = SymbolNamesTest { tcx: tcx }; + let mut visitor = SymbolNamesTest { tcx }; tcx.hir.krate().visit_all_item_likes(&mut visitor); }) } diff --git a/src/librustc_cratesio_shim/src/lib.rs b/src/librustc_cratesio_shim/src/lib.rs index 1fe70fa23b..56e480208e 100644 --- a/src/librustc_cratesio_shim/src/lib.rs +++ b/src/librustc_cratesio_shim/src/lib.rs @@ -15,4 +15,5 @@ extern crate bitflags; extern crate log; +extern crate proc_macro; extern crate unicode_width; diff --git a/src/librustc_data_structures/Cargo.toml b/src/librustc_data_structures/Cargo.toml index 1082000762..188919d063 100644 --- a/src/librustc_data_structures/Cargo.toml +++ b/src/librustc_data_structures/Cargo.toml @@ -9,14 +9,13 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] -ena = "0.9.3" +ena = "0.11" log = "0.4" rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } serialize = { path = "../libserialize" } graphviz = { path = "../libgraphviz" } cfg-if = "0.1.2" stable_deref_trait = "1.0.0" -parking_lot_core = "0.2.8" rustc-rayon = "0.1.1" rustc-rayon-core = "0.1.1" rustc-hash = "1.0.1" diff --git a/src/librustc_data_structures/flock.rs b/src/librustc_data_structures/flock.rs index 38ce331051..86e48e2162 100644 --- a/src/librustc_data_structures/flock.rs +++ b/src/librustc_data_structures/flock.rs @@ -214,7 +214,7 @@ cfg_if! { unsafe { libc::close(fd); } Err(err) } else { - Ok(Lock { fd: fd }) + Ok(Lock { fd }) } } } diff --git a/src/librustc_data_structures/fx.rs b/src/librustc_data_structures/fx.rs index bce21f5085..7c7fc3a934 100644 --- a/src/librustc_data_structures/fx.rs +++ b/src/librustc_data_structures/fx.rs @@ -8,6 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub use rustc_hash::FxHashMap; -pub use rustc_hash::FxHashSet; -pub use rustc_hash::FxHasher; +pub use rustc_hash::{FxHasher, FxHashMap, FxHashSet}; diff --git a/src/librustc_data_structures/graph/scc/mod.rs b/src/librustc_data_structures/graph/scc/mod.rs index a989a54010..64de0c2f56 100644 --- a/src/librustc_data_structures/graph/scc/mod.rs +++ b/src/librustc_data_structures/graph/scc/mod.rs @@ -38,7 +38,7 @@ struct SccData { /// successors can be found. ranges: IndexVec>, - /// Contains the succcessors for all the Sccs, concatenated. The + /// Contains the successors for all the Sccs, concatenated. The /// range of indices corresponding to a given SCC is found in its /// SccData. all_successors: Vec, diff --git a/src/librustc_data_structures/interner.rs b/src/librustc_data_structures/interner.rs new file mode 100644 index 0000000000..29e5aefee7 --- /dev/null +++ b/src/librustc_data_structures/interner.rs @@ -0,0 +1,68 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::hash::Hash; +use std::hash::BuildHasher; +use std::hash::Hasher; +use std::collections::HashMap; +use std::collections::hash_map::RawEntryMut; +use std::borrow::Borrow; + +pub trait HashInterner { + fn intern_ref K>(&mut self, value: &Q, make: F) -> K + where K: Borrow, + Q: Hash + Eq; + + fn intern K>(&mut self, value: Q, make: F) -> K + where K: Borrow, + Q: Hash + Eq; +} + +impl HashInterner for HashMap { + #[inline] + fn intern_ref K>(&mut self, value: &Q, make: F) -> K + where K: Borrow, + Q: Hash + Eq + { + let mut hasher = self.hasher().build_hasher(); + value.hash(&mut hasher); + let hash = hasher.finish(); + let entry = self.raw_entry_mut().from_key_hashed_nocheck(hash, value); + + match entry { + RawEntryMut::Occupied(e) => *e.key(), + RawEntryMut::Vacant(e) => { + let v = make(); + e.insert_hashed_nocheck(hash, v, ()); + v + } + } + } + + #[inline] + fn intern K>(&mut self, value: Q, make: F) -> K + where K: Borrow, + Q: Hash + Eq + { + let mut hasher = self.hasher().build_hasher(); + value.hash(&mut hasher); + let hash = hasher.finish(); + let entry = self.raw_entry_mut().from_key_hashed_nocheck(hash, &value); + + match entry { + RawEntryMut::Occupied(e) => *e.key(), + RawEntryMut::Vacant(e) => { + let v = make(value); + e.insert_hashed_nocheck(hash, v, ()); + v + } + } + } +} diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index b197d29b18..96cb235a93 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -21,7 +21,6 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(in_band_lifetimes)] -#![cfg_attr(stage0, feature(impl_header_lifetime_elision))] #![feature(unboxed_closures)] #![feature(fn_traits)] #![feature(unsize)] @@ -30,6 +29,7 @@ #![feature(nll)] #![feature(allow_internal_unstable)] #![feature(vec_resize_with)] +#![feature(hash_raw_entry)] #![cfg_attr(unix, feature(libc))] #![cfg_attr(test, feature(test))] @@ -58,6 +58,7 @@ extern crate rustc_cratesio_shim; pub use rustc_serialize::hex::ToHex; +pub mod macros; pub mod svh; pub mod base_n; pub mod bit_set; @@ -66,6 +67,7 @@ pub mod flock; pub mod fx; pub mod graph; pub mod indexed_vec; +pub mod interner; pub mod obligation_forest; pub mod owning_ref; pub mod ptr_key; diff --git a/src/librustc_data_structures/macros.rs b/src/librustc_data_structures/macros.rs new file mode 100644 index 0000000000..286a374b28 --- /dev/null +++ b/src/librustc_data_structures/macros.rs @@ -0,0 +1,22 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// A simple static assertion macro. The first argument should be a unique +/// ALL_CAPS identifier that describes the condition. +#[macro_export] +#[allow_internal_unstable] +macro_rules! static_assert { + ($name:ident: $test:expr) => { + // Use the bool to access an array such that if the bool is false, the access + // is out-of-bounds. + #[allow(dead_code)] + static $name: () = [()][!($test: bool) as usize]; + } +} diff --git a/src/librustc_data_structures/obligation_forest/mod.rs b/src/librustc_data_structures/obligation_forest/mod.rs index ccf2a7f815..c211d888df 100644 --- a/src/librustc_data_structures/obligation_forest/mod.rs +++ b/src/librustc_data_structures/obligation_forest/mod.rs @@ -162,8 +162,8 @@ enum NodeState { #[derive(Debug)] pub struct Outcome { /// Obligations that were completely evaluated, including all - /// (transitive) subobligations. - pub completed: Vec, + /// (transitive) subobligations. Only computed if requested. + pub completed: Option>, /// Backtrace of obligations that were found to be in error. pub errors: Vec>, @@ -177,6 +177,14 @@ pub struct Outcome { pub stalled: bool, } +/// Should `process_obligations` compute the `Outcome::completed` field of its +/// result? +#[derive(PartialEq)] +pub enum DoCompleted { + No, + Yes, +} + #[derive(Debug, PartialEq, Eq)] pub struct Error { pub error: E, @@ -282,8 +290,8 @@ impl ObligationForest { }); } } - let successful_obligations = self.compress(); - assert!(successful_obligations.is_empty()); + let successful_obligations = self.compress(DoCompleted::Yes); + assert!(successful_obligations.unwrap().is_empty()); errors } @@ -311,7 +319,8 @@ impl ObligationForest { /// be called in a loop until `outcome.stalled` is false. /// /// This CANNOT be unrolled (presently, at least). - pub fn process_obligations

(&mut self, processor: &mut P) -> Outcome + pub fn process_obligations

(&mut self, processor: &mut P, do_completed: DoCompleted) + -> Outcome where P: ObligationProcessor { debug!("process_obligations(len={})", self.nodes.len()); @@ -366,7 +375,7 @@ impl ObligationForest { // There's no need to perform marking, cycle processing and compression when nothing // changed. return Outcome { - completed: vec![], + completed: if do_completed == DoCompleted::Yes { Some(vec![]) } else { None }, errors, stalled, }; @@ -376,12 +385,12 @@ impl ObligationForest { self.process_cycles(processor); // Now we have to compress the result - let completed_obligations = self.compress(); + let completed = self.compress(do_completed); debug!("process_obligations: complete"); Outcome { - completed: completed_obligations, + completed, errors, stalled, } @@ -524,7 +533,7 @@ impl ObligationForest { /// Beforehand, all nodes must be marked as `Done` and no cycles /// on these nodes may be present. This is done by e.g. `process_cycles`. #[inline(never)] - fn compress(&mut self) -> Vec { + fn compress(&mut self, do_completed: DoCompleted) -> Option> { let nodes_len = self.nodes.len(); let mut node_rewrites: Vec<_> = self.scratch.take().unwrap(); node_rewrites.extend(0..nodes_len); @@ -573,21 +582,26 @@ impl ObligationForest { if dead_nodes == 0 { node_rewrites.truncate(0); self.scratch = Some(node_rewrites); - return vec![]; + return if do_completed == DoCompleted::Yes { Some(vec![]) } else { None }; } // Pop off all the nodes we killed and extract the success // stories. - let successful = (0..dead_nodes) - .map(|_| self.nodes.pop().unwrap()) - .flat_map(|node| { - match node.state.get() { - NodeState::Error => None, - NodeState::Done => Some(node.obligation), - _ => unreachable!() - } - }) - .collect(); + let successful = if do_completed == DoCompleted::Yes { + Some((0..dead_nodes) + .map(|_| self.nodes.pop().unwrap()) + .flat_map(|node| { + match node.state.get() { + NodeState::Error => None, + NodeState::Done => Some(node.obligation), + _ => unreachable!() + } + }) + .collect()) + } else { + self.nodes.truncate(self.nodes.len() - dead_nodes); + None + }; self.apply_rewrites(&node_rewrites); node_rewrites.truncate(0); diff --git a/src/librustc_data_structures/obligation_forest/test.rs b/src/librustc_data_structures/obligation_forest/test.rs index c27a65e343..2a418973fb 100644 --- a/src/librustc_data_structures/obligation_forest/test.rs +++ b/src/librustc_data_structures/obligation_forest/test.rs @@ -10,7 +10,7 @@ #![cfg(test)] -use super::{Error, ObligationForest, ObligationProcessor, Outcome, ProcessResult}; +use super::{Error, DoCompleted, ObligationForest, ObligationProcessor, Outcome, ProcessResult}; use std::fmt; use std::marker::PhantomData; @@ -84,8 +84,8 @@ fn push_pop() { "C" => ProcessResult::Changed(vec![]), _ => unreachable!(), } - }, |_| {})); - assert_eq!(ok, vec!["C"]); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["C"]); assert_eq!(err, vec![Error { error: "B is for broken", @@ -108,8 +108,8 @@ fn push_pop() { "D" => ProcessResult::Changed(vec!["D.1", "D.2"]), _ => unreachable!(), } - }, |_| {})); - assert_eq!(ok, Vec::<&'static str>::new()); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), Vec::<&'static str>::new()); assert_eq!(err, Vec::new()); @@ -127,8 +127,8 @@ fn push_pop() { "D.2" => ProcessResult::Changed(vec!["D.2.i"]), _ => unreachable!(), } - }, |_| {})); - assert_eq!(ok, vec!["A.3", "A.1", "A.3.i"]); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["A.3", "A.1", "A.3.i"]); assert_eq!(err, vec![Error { error: "A is for apple", @@ -143,8 +143,8 @@ fn push_pop() { "D.2.i" => ProcessResult::Changed(vec![]), _ => panic!("unexpected obligation {:?}", obligation), } - }, |_| {})); - assert_eq!(ok, vec!["D.2.i", "D.2"]); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["D.2.i", "D.2"]); assert_eq!(err, vec![Error { error: "D is for dumb", @@ -171,8 +171,8 @@ fn success_in_grandchildren() { "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]), _ => unreachable!(), } - }, |_| {})); - assert!(ok.is_empty()); + }, |_| {}), DoCompleted::Yes); + assert!(ok.unwrap().is_empty()); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = @@ -183,8 +183,8 @@ fn success_in_grandchildren() { "A.3" => ProcessResult::Changed(vec![]), _ => unreachable!(), } - }, |_| {})); - assert_eq!(ok, vec!["A.3", "A.1"]); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["A.3", "A.1"]); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = @@ -194,8 +194,8 @@ fn success_in_grandchildren() { "A.2.ii" => ProcessResult::Changed(vec![]), _ => unreachable!(), } - }, |_| {})); - assert_eq!(ok, vec!["A.2.ii"]); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["A.2.ii"]); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = @@ -204,14 +204,15 @@ fn success_in_grandchildren() { "A.2.i.a" => ProcessResult::Changed(vec![]), _ => unreachable!(), } - }, |_| {})); - assert_eq!(ok, vec!["A.2.i.a", "A.2.i", "A.2", "A"]); + }, |_| {}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["A.2.i.a", "A.2.i", "A.2", "A"]); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations(&mut C(|_| unreachable!(), |_| {})); + forest.process_obligations(&mut C(|_| unreachable!(), |_| {}), + DoCompleted::Yes); - assert!(ok.is_empty()); + assert!(ok.unwrap().is_empty()); assert!(err.is_empty()); } @@ -227,8 +228,8 @@ fn to_errors_no_throw() { "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err.len(), 0); let errors = forest.to_errors(()); assert_eq!(errors[0].backtrace, vec!["A.1", "A"]); @@ -248,8 +249,8 @@ fn diamond() { "A" => ProcessResult::Changed(vec!["A.1", "A.2"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err.len(), 0); let Outcome { completed: ok, errors: err, .. } = @@ -259,8 +260,8 @@ fn diamond() { "A.2" => ProcessResult::Changed(vec!["D"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err.len(), 0); let mut d_count = 0; @@ -270,9 +271,9 @@ fn diamond() { "D" => { d_count += 1; ProcessResult::Changed(vec![]) }, _ => unreachable!(), } - }, |_|{})); + }, |_|{}), DoCompleted::Yes); assert_eq!(d_count, 1); - assert_eq!(ok, vec!["D", "A.2", "A.1", "A"]); + assert_eq!(ok.unwrap(), vec!["D", "A.2", "A.1", "A"]); assert_eq!(err.len(), 0); let errors = forest.to_errors(()); @@ -285,8 +286,8 @@ fn diamond() { "A'" => ProcessResult::Changed(vec!["A'.1", "A'.2"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err.len(), 0); let Outcome { completed: ok, errors: err, .. } = @@ -296,8 +297,8 @@ fn diamond() { "A'.2" => ProcessResult::Changed(vec!["D'"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err.len(), 0); let mut d_count = 0; @@ -307,9 +308,9 @@ fn diamond() { "D'" => { d_count += 1; ProcessResult::Error("operation failed") }, _ => unreachable!(), } - }, |_|{})); + }, |_|{}), DoCompleted::Yes); assert_eq!(d_count, 1); - assert_eq!(ok.len(), 0); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err, vec![super::Error { error: "operation failed", backtrace: vec!["D'", "A'.1", "A'"] @@ -333,8 +334,8 @@ fn done_dependency() { "A: Sized" | "B: Sized" | "C: Sized" => ProcessResult::Changed(vec![]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok, vec!["C: Sized", "B: Sized", "A: Sized"]); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["C: Sized", "B: Sized", "A: Sized"]); assert_eq!(err.len(), 0); forest.register_obligation("(A,B,C): Sized"); @@ -348,8 +349,8 @@ fn done_dependency() { ]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok, vec!["(A,B,C): Sized"]); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["(A,B,C): Sized"]); assert_eq!(err.len(), 0); } @@ -371,8 +372,8 @@ fn orphan() { "C2" => ProcessResult::Changed(vec![]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok, vec!["C2", "C1"]); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap(), vec!["C2", "C1"]); assert_eq!(err.len(), 0); let Outcome { completed: ok, errors: err, .. } = @@ -382,8 +383,8 @@ fn orphan() { "B" => ProcessResult::Changed(vec!["D"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err.len(), 0); let Outcome { completed: ok, errors: err, .. } = @@ -393,8 +394,8 @@ fn orphan() { "E" => ProcessResult::Error("E is for error"), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err, vec![super::Error { error: "E is for error", backtrace: vec!["E", "A"] @@ -406,8 +407,8 @@ fn orphan() { "D" => ProcessResult::Error("D is dead"), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err, vec![super::Error { error: "D is dead", backtrace: vec!["D"] @@ -431,8 +432,8 @@ fn simultaneous_register_and_error() { "B" => ProcessResult::Changed(vec!["A"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] @@ -449,8 +450,8 @@ fn simultaneous_register_and_error() { "B" => ProcessResult::Changed(vec!["A"]), _ => unreachable!(), } - }, |_|{})); - assert_eq!(ok.len(), 0); + }, |_|{}), DoCompleted::Yes); + assert_eq!(ok.unwrap().len(), 0); assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] diff --git a/src/librustc_data_structures/owning_ref/mod.rs b/src/librustc_data_structures/owning_ref/mod.rs index 02640a7101..27c2f8b718 100644 --- a/src/librustc_data_structures/owning_ref/mod.rs +++ b/src/librustc_data_structures/owning_ref/mod.rs @@ -452,7 +452,7 @@ impl OwningRef { /// use owning_ref::{OwningRef, Erased}; /// /// fn main() { - /// // NB: Using the concrete types here for explicitnes. + /// // NB: Using the concrete types here for explicitness. /// // For less verbose code type aliases like `BoxRef` are provided. /// /// let owning_ref_a: OwningRef, [i32; 4]> @@ -722,7 +722,7 @@ impl OwningRefMut { /// use owning_ref::{OwningRefMut, Erased}; /// /// fn main() { - /// // NB: Using the concrete types here for explicitnes. + /// // NB: Using the concrete types here for explicitness. /// // For less verbose code type aliases like `BoxRef` are provided. /// /// let owning_ref_mut_a: OwningRefMut, [i32; 4]> diff --git a/src/librustc_data_structures/snapshot_map/mod.rs b/src/librustc_data_structures/snapshot_map/mod.rs index f8663cd4fd..c256506a19 100644 --- a/src/librustc_data_structures/snapshot_map/mod.rs +++ b/src/librustc_data_structures/snapshot_map/mod.rs @@ -21,6 +21,20 @@ pub struct SnapshotMap { map: FxHashMap, undo_log: Vec>, + num_open_snapshots: usize, +} + +// HACK(eddyb) manual impl avoids `Default` bounds on `K` and `V`. +impl Default for SnapshotMap + where K: Hash + Clone + Eq +{ + fn default() -> Self { + SnapshotMap { + map: Default::default(), + undo_log: Default::default(), + num_open_snapshots: 0, + } + } } pub struct Snapshot { @@ -28,22 +42,9 @@ pub struct Snapshot { } enum UndoLog { - OpenSnapshot, - CommittedSnapshot, Inserted(K), Overwrite(K, V), - Noop, -} - -impl Default for SnapshotMap - where K: Hash + Clone + Eq -{ - fn default() -> Self { - SnapshotMap { - map: FxHashMap::default(), - undo_log: vec![], - } - } + Purged, } impl SnapshotMap @@ -52,18 +53,23 @@ impl SnapshotMap pub fn clear(&mut self) { self.map.clear(); self.undo_log.clear(); + self.num_open_snapshots = 0; + } + + fn in_snapshot(&self) -> bool { + self.num_open_snapshots > 0 } pub fn insert(&mut self, key: K, value: V) -> bool { match self.map.insert(key.clone(), value) { None => { - if !self.undo_log.is_empty() { + if self.in_snapshot() { self.undo_log.push(UndoLog::Inserted(key)); } true } Some(old_value) => { - if !self.undo_log.is_empty() { + if self.in_snapshot() { self.undo_log.push(UndoLog::Overwrite(key, old_value)); } false @@ -71,16 +77,10 @@ impl SnapshotMap } } - pub fn insert_noop(&mut self) { - if !self.undo_log.is_empty() { - self.undo_log.push(UndoLog::Noop); - } - } - pub fn remove(&mut self, key: K) -> bool { match self.map.remove(&key) { Some(old_value) => { - if !self.undo_log.is_empty() { + if self.in_snapshot() { self.undo_log.push(UndoLog::Overwrite(key, old_value)); } true @@ -94,27 +94,27 @@ impl SnapshotMap } pub fn snapshot(&mut self) -> Snapshot { - self.undo_log.push(UndoLog::OpenSnapshot); - let len = self.undo_log.len() - 1; + let len = self.undo_log.len(); + self.num_open_snapshots += 1; Snapshot { len } } fn assert_open_snapshot(&self, snapshot: &Snapshot) { - assert!(snapshot.len < self.undo_log.len()); - assert!(match self.undo_log[snapshot.len] { - UndoLog::OpenSnapshot => true, - _ => false, - }); + assert!(self.undo_log.len() >= snapshot.len); + assert!(self.num_open_snapshots > 0); } - pub fn commit(&mut self, snapshot: &Snapshot) { - self.assert_open_snapshot(snapshot); - if snapshot.len == 0 { - // The root snapshot. - self.undo_log.truncate(0); - } else { - self.undo_log[snapshot.len] = UndoLog::CommittedSnapshot; + pub fn commit(&mut self, snapshot: Snapshot) { + self.assert_open_snapshot(&snapshot); + if self.num_open_snapshots == 1 { + // The root snapshot. It's safe to clear the undo log because + // there's no snapshot further out that we might need to roll back + // to. + assert!(snapshot.len == 0); + self.undo_log.clear(); } + + self.num_open_snapshots -= 1; } pub fn partial_rollback(&mut self, @@ -123,45 +123,32 @@ impl SnapshotMap where F: Fn(&K) -> bool { self.assert_open_snapshot(snapshot); - for i in (snapshot.len + 1..self.undo_log.len()).rev() { + for i in (snapshot.len .. self.undo_log.len()).rev() { let reverse = match self.undo_log[i] { - UndoLog::OpenSnapshot => false, - UndoLog::CommittedSnapshot => false, - UndoLog::Noop => false, + UndoLog::Purged => false, UndoLog::Inserted(ref k) => should_revert_key(k), UndoLog::Overwrite(ref k, _) => should_revert_key(k), }; if reverse { - let entry = mem::replace(&mut self.undo_log[i], UndoLog::Noop); + let entry = mem::replace(&mut self.undo_log[i], UndoLog::Purged); self.reverse(entry); } } } - pub fn rollback_to(&mut self, snapshot: &Snapshot) { - self.assert_open_snapshot(snapshot); - while self.undo_log.len() > snapshot.len + 1 { + pub fn rollback_to(&mut self, snapshot: Snapshot) { + self.assert_open_snapshot(&snapshot); + while self.undo_log.len() > snapshot.len { let entry = self.undo_log.pop().unwrap(); self.reverse(entry); } - let v = self.undo_log.pop().unwrap(); - assert!(match v { - UndoLog::OpenSnapshot => true, - _ => false, - }); - assert!(self.undo_log.len() == snapshot.len); + self.num_open_snapshots -= 1; } fn reverse(&mut self, entry: UndoLog) { match entry { - UndoLog::OpenSnapshot => { - panic!("cannot rollback an uncommitted snapshot"); - } - - UndoLog::CommittedSnapshot => {} - UndoLog::Inserted(key) => { self.map.remove(&key); } @@ -170,7 +157,7 @@ impl SnapshotMap self.map.insert(key, old_value); } - UndoLog::Noop => {} + UndoLog::Purged => {} } } } diff --git a/src/librustc_data_structures/snapshot_map/test.rs b/src/librustc_data_structures/snapshot_map/test.rs index 700f9c95e3..b4ecb85fc4 100644 --- a/src/librustc_data_structures/snapshot_map/test.rs +++ b/src/librustc_data_structures/snapshot_map/test.rs @@ -17,10 +17,10 @@ fn basic() { let snapshot = map.snapshot(); map.insert(22, "thirty-three"); assert_eq!(map[&22], "thirty-three"); - map.insert(44, "fourty-four"); - assert_eq!(map[&44], "fourty-four"); + map.insert(44, "forty-four"); + assert_eq!(map[&44], "forty-four"); assert_eq!(map.get(&33), None); - map.rollback_to(&snapshot); + map.rollback_to(snapshot); assert_eq!(map[&22], "twenty-two"); assert_eq!(map.get(&33), None); assert_eq!(map.get(&44), None); @@ -32,8 +32,11 @@ fn out_of_order() { let mut map = SnapshotMap::default(); map.insert(22, "twenty-two"); let snapshot1 = map.snapshot(); - let _snapshot2 = map.snapshot(); - map.rollback_to(&snapshot1); + map.insert(33, "thirty-three"); + let snapshot2 = map.snapshot(); + map.insert(44, "forty-four"); + map.rollback_to(snapshot1); // bogus, but accepted + map.rollback_to(snapshot2); // asserts } #[test] @@ -43,8 +46,8 @@ fn nested_commit_then_rollback() { let snapshot1 = map.snapshot(); let snapshot2 = map.snapshot(); map.insert(22, "thirty-three"); - map.commit(&snapshot2); + map.commit(snapshot2); assert_eq!(map[&22], "thirty-three"); - map.rollback_to(&snapshot1); + map.rollback_to(snapshot1); assert_eq!(map[&22], "twenty-two"); } diff --git a/src/librustc_data_structures/sorted_map.rs b/src/librustc_data_structures/sorted_map.rs index 730b13a058..29d99a6aef 100644 --- a/src/librustc_data_structures/sorted_map.rs +++ b/src/librustc_data_structures/sorted_map.rs @@ -15,7 +15,7 @@ use std::mem; use std::ops::{RangeBounds, Bound, Index, IndexMut}; /// `SortedMap` is a data structure with similar characteristics as BTreeMap but -/// slightly different trade-offs: lookup, inseration, and removal are O(log(N)) +/// slightly different trade-offs: lookup, insertion, and removal are O(log(N)) /// and elements can be iterated in order cheaply. /// /// `SortedMap` can be faster than a `BTreeMap` for small sizes (<50) since it diff --git a/src/librustc_data_structures/svh.rs b/src/librustc_data_structures/svh.rs index 94f132562b..3d17824608 100644 --- a/src/librustc_data_structures/svh.rs +++ b/src/librustc_data_structures/svh.rs @@ -31,7 +31,7 @@ impl Svh { /// compute the SVH from some HIR, you want the `calculate_svh` /// function found in `librustc_incremental`. pub fn new(hash: u64) -> Svh { - Svh { hash: hash } + Svh { hash } } pub fn as_u64(&self) -> u64 { diff --git a/src/librustc_data_structures/tiny_list.rs b/src/librustc_data_structures/tiny_list.rs index e1bfdf35b2..9dbf0ea9f4 100644 --- a/src/librustc_data_structures/tiny_list.rs +++ b/src/librustc_data_structures/tiny_list.rs @@ -22,8 +22,6 @@ //! If you expect to store more than 1 element in the common case, steer clear //! and use a `Vec`, `Box<[T]>`, or a `SmallVec`. -use std::mem; - #[derive(Clone, Hash, Debug, PartialEq)] pub struct TinyList { head: Option> @@ -52,7 +50,7 @@ impl TinyList { pub fn insert(&mut self, data: T) { self.head = Some(Element { data, - next: mem::replace(&mut self.head, None).map(Box::new), + next: self.head.take().map(Box::new) }); } @@ -60,7 +58,7 @@ impl TinyList { pub fn remove(&mut self, data: &T) -> bool { self.head = match self.head { Some(ref mut head) if head.data == *data => { - mem::replace(&mut head.next, None).map(|x| *x) + head.next.take().map(|x| *x) } Some(ref mut head) => return head.remove_next(data), None => return false, @@ -100,7 +98,7 @@ impl Element { if next.data != *data { return next.remove_next(data) } else { - mem::replace(&mut next.next, None) + next.next.take() } } else { return false diff --git a/src/librustc_data_structures/transitive_relation.rs b/src/librustc_data_structures/transitive_relation.rs index e1318eb54d..fd5dfab9e6 100644 --- a/src/librustc_data_structures/transitive_relation.rs +++ b/src/librustc_data_structures/transitive_relation.rs @@ -42,6 +42,18 @@ pub struct TransitiveRelation { closure: Lock>>, } +// HACK(eddyb) manual impl avoids `Default` bound on `T`. +impl Default for TransitiveRelation { + fn default() -> Self { + TransitiveRelation { + elements: Default::default(), + map: Default::default(), + edges: Default::default(), + closure: Default::default(), + } + } +} + #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable, Debug)] struct Index(usize); @@ -51,17 +63,6 @@ struct Edge { target: Index, } -impl Default for TransitiveRelation { - fn default() -> TransitiveRelation { - TransitiveRelation { - elements: vec![], - map: FxHashMap::default(), - edges: vec![], - closure: Lock::new(None), - } - } -} - impl TransitiveRelation { pub fn is_empty(&self) -> bool { self.edges.is_empty() diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 470c8b03d0..1e32f5ef6f 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -38,3 +38,8 @@ syntax = { path = "../libsyntax" } smallvec = { version = "0.6.5", features = ["union"] } syntax_ext = { path = "../libsyntax_ext" } syntax_pos = { path = "../libsyntax_pos" } + +[dependencies.jemalloc-sys] +version = '0.1.8' +optional = true +features = ['unprefixed_malloc_on_supported_platforms'] diff --git a/src/librustc_driver/README.md b/src/librustc_driver/README.md index fef249a9e4..c4d73953e9 100644 --- a/src/librustc_driver/README.md +++ b/src/librustc_driver/README.md @@ -7,4 +7,4 @@ options). For more information about how the driver works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/rustc-driver.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/rustc-driver.html diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 07803d0659..777cc09b8b 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -62,7 +62,7 @@ use syntax::symbol::Symbol; use syntax_pos::{FileName, hygiene}; use syntax_ext; -use derive_registrar; +use proc_macro_decls; use pretty::ReplaceBodyWithLoop; use profile; @@ -1066,7 +1066,7 @@ where let num_crate_types = crate_types.len(); let is_proc_macro_crate = crate_types.contains(&config::CrateType::ProcMacro); let is_test_crate = sess.opts.test; - syntax_ext::proc_macro_registrar::modify( + syntax_ext::proc_macro_decls::modify( &sess.parse_sess, &mut resolver, krate, @@ -1243,8 +1243,8 @@ where .set(time(sess, "looking for plugin registrar", || { plugin::build::find_plugin_registrar(sess.diagnostic(), &hir_map) })); - sess.derive_registrar_fn - .set(derive_registrar::find(&hir_map)); + sess.proc_macro_decls_static + .set(proc_macro_decls::find(&hir_map)); time(sess, "loop checking", || loops::check_crate(sess, &hir_map)); diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 9a0d461444..061c19eca3 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -64,6 +64,14 @@ extern crate syntax; extern crate syntax_ext; extern crate syntax_pos; +// Note that the linkage here should be all that we need, on Linux we're not +// prefixing the symbols here so this should naturally override our default +// allocator. On OSX it should override via the zone allocator. We shouldn't +// enable this by default on other platforms, so other platforms aren't handled +// here yet. +#[cfg(feature = "jemalloc-sys")] +extern crate jemalloc_sys; + use driver::CompileController; use pretty::{PpMode, UserIdentifiedItem}; @@ -119,7 +127,7 @@ mod test; pub mod profile; pub mod driver; pub mod pretty; -mod derive_registrar; +mod proc_macro_decls; pub mod target_features { use syntax::ast; @@ -635,8 +643,8 @@ impl Compilation { } } -/// A trait for customising the compilation process. Offers a number of hooks for -/// executing custom code or customising input. +/// A trait for customizing the compilation process. Offers a number of hooks for +/// executing custom code or customizing input. pub trait CompilerCalls<'a> { /// Hook for a callback early in the process of handling arguments. This will /// be called straight after options have been parsed but before anything @@ -944,7 +952,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { control.compilation_done.callback = box move |state| { old_callback(state); let sess = state.session; - println!("Fuel used by {}: {}", + eprintln!("Fuel used by {}: {}", sess.print_fuel_crate.as_ref().unwrap(), sess.print_fuel.get()); } @@ -1691,7 +1699,6 @@ pub fn diagnostics_registry() -> errors::registry::Registry { all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); // FIXME: need to figure out a way to get these back in here // all_errors.extend_from_slice(get_codegen_backend(sess).diagnostics()); - all_errors.extend_from_slice(&rustc_codegen_utils::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS); diff --git a/src/librustc_driver/pretty.rs b/src/librustc_driver/pretty.rs index b4f6d10b1f..fb8093d1d7 100644 --- a/src/librustc_driver/pretty.rs +++ b/src/librustc_driver/pretty.rs @@ -425,7 +425,7 @@ impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> { pprust_hir::AnnNode::Item(item) => { s.s.space()?; s.synth_comment(format!("node_id: {} hir local_id: {}", - item.id, item.hir_id.local_id.0)) + item.id, item.hir_id.local_id.as_u32())) } pprust_hir::AnnNode::SubItem(id) => { s.s.space()?; @@ -434,18 +434,18 @@ impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> { pprust_hir::AnnNode::Block(blk) => { s.s.space()?; s.synth_comment(format!("block node_id: {} hir local_id: {}", - blk.id, blk.hir_id.local_id.0)) + blk.id, blk.hir_id.local_id.as_u32())) } pprust_hir::AnnNode::Expr(expr) => { s.s.space()?; s.synth_comment(format!("node_id: {} hir local_id: {}", - expr.id, expr.hir_id.local_id.0))?; + expr.id, expr.hir_id.local_id.as_u32()))?; s.pclose() } pprust_hir::AnnNode::Pat(pat) => { s.s.space()?; s.synth_comment(format!("pat node_id: {} hir local_id: {}", - pat.id, pat.hir_id.local_id.0)) + pat.id, pat.hir_id.local_id.as_u32())) } } } @@ -530,7 +530,7 @@ impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { s.s.space()?; s.s.word("as")?; s.s.space()?; - s.s.word(&self.tables.get().expr_ty(expr).to_string())?; + s.s.word(self.tables.get().expr_ty(expr).to_string())?; s.pclose() } _ => Ok(()), @@ -566,7 +566,7 @@ impl FromStr for UserIdentifiedItem { type Err = (); fn from_str(s: &str) -> Result { Ok(s.parse() - .map(ast::NodeId::new) + .map(ast::NodeId::from_u32) .map(ItemViaNode) .unwrap_or_else(|_| ItemViaPath(s.split("::").map(|s| s.to_string()).collect()))) } diff --git a/src/librustc_driver/derive_registrar.rs b/src/librustc_driver/proc_macro_decls.rs similarity index 81% rename from src/librustc_driver/derive_registrar.rs rename to src/librustc_driver/proc_macro_decls.rs index 9983efce6a..136a27b1ce 100644 --- a/src/librustc_driver/derive_registrar.rs +++ b/src/librustc_driver/proc_macro_decls.rs @@ -17,19 +17,19 @@ use syntax::attr; pub fn find(hir_map: &Map) -> Option { let krate = hir_map.krate(); - let mut finder = Finder { registrar: None }; + let mut finder = Finder { decls: None }; krate.visit_all_item_likes(&mut finder); - finder.registrar + finder.decls } struct Finder { - registrar: Option, + decls: Option, } impl<'v> ItemLikeVisitor<'v> for Finder { fn visit_item(&mut self, item: &hir::Item) { - if attr::contains_name(&item.attrs, "rustc_derive_registrar") { - self.registrar = Some(item.id); + if attr::contains_name(&item.attrs, "rustc_proc_macro_decls") { + self.decls = Some(item.id); } } diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs index f18f40bf7a..8865c7e438 100644 --- a/src/librustc_driver/test.rs +++ b/src/librustc_driver/test.rs @@ -232,20 +232,20 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { // children of 1, etc let dscope = region::Scope { - id: hir::ItemLocalId(1), + id: hir::ItemLocalId::from_u32(1), data: region::ScopeData::Destruction, }; self.region_scope_tree.record_scope_parent(dscope, None); self.create_region_hierarchy( &RH { - id: hir::ItemLocalId(1), + id: hir::ItemLocalId::from_u32(1), sub: &[ RH { - id: hir::ItemLocalId(10), + id: hir::ItemLocalId::from_u32(10), sub: &[], }, RH { - id: hir::ItemLocalId(11), + id: hir::ItemLocalId::from_u32(11), sub: &[], }, ], @@ -400,7 +400,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { pub fn t_rptr_scope(&self, id: u32) -> Ty<'tcx> { let r = ty::ReScope(region::Scope { - id: hir::ItemLocalId(id), + id: hir::ItemLocalId::from_u32(id), data: region::ScopeData::Node, }); self.infcx @@ -616,22 +616,22 @@ fn escaping() { // Theta = [A -> &'a foo] env.create_simple_region_hierarchy(); - assert!(!env.t_nil().has_escaping_regions()); + assert!(!env.t_nil().has_escaping_bound_vars()); let t_rptr_free1 = env.t_rptr_free(1); - assert!(!t_rptr_free1.has_escaping_regions()); + assert!(!t_rptr_free1.has_escaping_bound_vars()); let t_rptr_bound1 = env.t_rptr_late_bound_with_debruijn(1, d1()); - assert!(t_rptr_bound1.has_escaping_regions()); + assert!(t_rptr_bound1.has_escaping_bound_vars()); let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, d2()); - assert!(t_rptr_bound2.has_escaping_regions()); + assert!(t_rptr_bound2.has_escaping_bound_vars()); // t_fn = fn(A) let t_param = env.t_param(0); - assert!(!t_param.has_escaping_regions()); + assert!(!t_param.has_escaping_bound_vars()); let t_fn = env.t_fn(&[t_param], env.t_nil()); - assert!(!t_fn.has_escaping_regions()); + assert!(!t_fn.has_escaping_bound_vars()); }) } diff --git a/src/librustc_errors/diagnostic.rs b/src/librustc_errors/diagnostic.rs index 870eeadc08..ea425ad4c4 100644 --- a/src/librustc_errors/diagnostic.rs +++ b/src/librustc_errors/diagnostic.rs @@ -139,6 +139,17 @@ impl Diagnostic { self } + pub fn replace_span_with(&mut self, after: Span) -> &mut Self { + let before = self.span.clone(); + self.set_span(after); + for span_label in before.span_labels() { + if let Some(label) = span_label.label { + self.span_label(after, label); + } + } + self + } + pub fn note_expected_found(&mut self, label: &dyn fmt::Display, expected: DiagnosticStyledString, @@ -350,10 +361,10 @@ impl Diagnostic { } pub fn span_suggestions_with_applicability(&mut self, sp: Span, msg: &str, - suggestions: Vec, - applicability: Applicability) -> &mut Self { + suggestions: impl Iterator, applicability: Applicability) -> &mut Self + { self.suggestions.push(CodeSuggestion { - substitutions: suggestions.into_iter().map(|snippet| Substitution { + substitutions: suggestions.map(|snippet| Substitution { parts: vec![SubstitutionPart { snippet, span: sp, diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index f4289ea2d4..2f16470530 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -253,7 +253,7 @@ impl<'a> DiagnosticBuilder<'a> { pub fn span_suggestions_with_applicability(&mut self, sp: Span, msg: &str, - suggestions: Vec, + suggestions: impl Iterator, applicability: Applicability) -> &mut Self { if !self.allow_suggestions { diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs index 5f275b7003..7e69e98071 100644 --- a/src/librustc_errors/emitter.rs +++ b/src/librustc_errors/emitter.rs @@ -108,7 +108,13 @@ pub enum ColorConfig { impl ColorConfig { fn to_color_choice(&self) -> ColorChoice { match *self { - ColorConfig::Always => ColorChoice::Always, + ColorConfig::Always => { + if atty::is(atty::Stream::Stderr) { + ColorChoice::Always + } else { + ColorChoice::AlwaysAnsi + } + } ColorConfig::Never => ColorChoice::Never, ColorConfig::Auto if atty::is(atty::Stream::Stderr) => { ColorChoice::Auto @@ -120,7 +126,7 @@ impl ColorConfig { pub struct EmitterWriter { dst: Destination, - cm: Option>, + sm: Option>, short_message: bool, teach: bool, ui_testing: bool, @@ -134,14 +140,14 @@ struct FileWithAnnotatedLines { impl EmitterWriter { pub fn stderr(color_config: ColorConfig, - code_map: Option>, + source_map: Option>, short_message: bool, teach: bool) -> EmitterWriter { let dst = Destination::from_stderr(color_config); EmitterWriter { dst, - cm: code_map, + sm: source_map, short_message, teach, ui_testing: false, @@ -149,13 +155,13 @@ impl EmitterWriter { } pub fn new(dst: Box, - code_map: Option>, + source_map: Option>, short_message: bool, teach: bool) -> EmitterWriter { EmitterWriter { dst: Raw(dst), - cm: code_map, + sm: source_map, short_message, teach, ui_testing: false, @@ -214,14 +220,14 @@ impl EmitterWriter { let mut output = vec![]; let mut multiline_annotations = vec![]; - if let Some(ref cm) = self.cm { + if let Some(ref sm) = self.sm { for span_label in msp.span_labels() { if span_label.span.is_dummy() { continue; } - let lo = cm.lookup_char_pos(span_label.span.lo()); - let mut hi = cm.lookup_char_pos(span_label.span.hi()); + let lo = sm.lookup_char_pos(span_label.span.lo()); + let mut hi = sm.lookup_char_pos(span_label.span.hi()); // Watch out for "empty spans". If we get a span like 6..6, we // want to just display a `^` at 6, so convert that to @@ -724,10 +730,10 @@ impl EmitterWriter { fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize { let mut max = 0; - if let Some(ref cm) = self.cm { + if let Some(ref sm) = self.sm { for primary_span in msp.primary_spans() { if !primary_span.is_dummy() { - let hi = cm.lookup_char_pos(primary_span.hi()); + let hi = sm.lookup_char_pos(primary_span.hi()); if hi.line > max { max = hi.line; } @@ -736,7 +742,7 @@ impl EmitterWriter { if !self.short_message { for span_label in msp.span_labels() { if !span_label.span.is_dummy() { - let hi = cm.lookup_char_pos(span_label.span.hi()); + let hi = sm.lookup_char_pos(span_label.span.hi()); if hi.line > max { max = hi.line; } @@ -768,7 +774,7 @@ impl EmitterWriter { always_backtrace: bool) -> bool { let mut spans_updated = false; - if let Some(ref cm) = self.cm { + if let Some(ref sm) = self.sm { let mut before_after: Vec<(Span, Span)> = vec![]; let mut new_labels: Vec<(Span, String)> = vec![]; @@ -777,7 +783,7 @@ impl EmitterWriter { if sp.is_dummy() { continue; } - let call_sp = cm.call_span_if_macro(*sp); + let call_sp = sm.call_span_if_macro(*sp); if call_sp != *sp && !always_backtrace { before_after.push((*sp, call_sp)); } @@ -802,7 +808,7 @@ impl EmitterWriter { }))); } // Check to make sure we're not in any <*macros> - if !cm.span_to_filename(def_site).is_macros() && + if !sm.span_to_filename(def_site).is_macros() && !trace.macro_decl_name.starts_with("desugaring of ") && !trace.macro_decl_name.starts_with("#[") || always_backtrace { @@ -829,7 +835,7 @@ impl EmitterWriter { if sp_label.span.is_dummy() { continue; } - if cm.span_to_filename(sp_label.span.clone()).is_macros() && + if sm.span_to_filename(sp_label.span.clone()).is_macros() && !always_backtrace { let v = sp_label.span.macro_backtrace(); @@ -1000,10 +1006,10 @@ impl EmitterWriter { let mut annotated_files = self.preprocess_annotations(msp); // Make sure our primary file comes first - let (primary_lo, cm) = if let (Some(cm), Some(ref primary_span)) = - (self.cm.as_ref(), msp.primary_span().as_ref()) { + let (primary_lo, sm) = if let (Some(sm), Some(ref primary_span)) = + (self.sm.as_ref(), msp.primary_span().as_ref()) { if !primary_span.is_dummy() { - (cm.lookup_char_pos(primary_span.lo()), cm) + (sm.lookup_char_pos(primary_span.lo()), sm) } else { emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?; return Ok(()); @@ -1021,7 +1027,7 @@ impl EmitterWriter { // Print out the annotate source lines that correspond with the error for annotated_file in annotated_files { // we can't annotate anything if the source is unavailable. - if !cm.ensure_source_file_source_present(annotated_file.file.clone()) { + if !sm.ensure_source_file_source_present(annotated_file.file.clone()) { continue; } @@ -1038,7 +1044,7 @@ impl EmitterWriter { buffer.append(buffer_msg_line_offset, &format!("{}:{}:{}", loc.file.name, - cm.doctest_offset_line(loc.line), + sm.doctest_offset_line(loc.line), loc.col.0 + 1), Style::LineAndColumn); for _ in 0..max_line_num_len { @@ -1048,7 +1054,7 @@ impl EmitterWriter { buffer.prepend(0, &format!("{}:{}:{}: ", loc.file.name, - cm.doctest_offset_line(loc.line), + sm.doctest_offset_line(loc.line), loc.col.0 + 1), Style::LineAndColumn); } @@ -1069,7 +1075,7 @@ impl EmitterWriter { }; format!("{}:{}{}", annotated_file.file.name, - cm.doctest_offset_line(first_line.line_index), + sm.doctest_offset_line(first_line.line_index), col) } else { annotated_file.file.name.to_string() @@ -1194,7 +1200,7 @@ impl EmitterWriter { level: &Level, max_line_num_len: usize) -> io::Result<()> { - if let Some(ref cm) = self.cm { + if let Some(ref sm) = self.sm { let mut buffer = StyledBuffer::new(); // Render the suggestion message @@ -1210,7 +1216,7 @@ impl EmitterWriter { Some(Style::HeaderMsg)); // Render the replacements for each suggestion - let suggestions = suggestion.splice_lines(&**cm); + let suggestions = suggestion.splice_lines(&**sm); let mut row_num = 2; for &(ref complete, ref parts) in suggestions.iter().take(MAX_SUGGESTIONS) { @@ -1221,11 +1227,11 @@ impl EmitterWriter { && parts[0].snippet.trim() == complete.trim()) && complete.lines().count() == 1; - let lines = cm.span_to_lines(parts[0].span).unwrap(); + let lines = sm.span_to_lines(parts[0].span).unwrap(); assert!(!lines.lines.is_empty()); - let line_start = cm.lookup_char_pos(parts[0].span.lo()).line; + let line_start = sm.lookup_char_pos(parts[0].span.lo()).line; draw_col_separator_no_space(&mut buffer, 1, max_line_num_len + 1); let mut line_pos = 0; let mut lines = complete.lines(); @@ -1250,8 +1256,8 @@ impl EmitterWriter { if show_underline { draw_col_separator(&mut buffer, row_num, max_line_num_len + 1); for part in parts { - let span_start_pos = cm.lookup_char_pos(part.span.lo()).col_display; - let span_end_pos = cm.lookup_char_pos(part.span.hi()).col_display; + let span_start_pos = sm.lookup_char_pos(part.span.lo()).col_display; + let span_end_pos = sm.lookup_char_pos(part.span.hi()).col_display; // Do not underline the leading... let start = part.snippet.len() diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs index 9bd95e8262..0fb77a7a3a 100644 --- a/src/librustc_errors/lib.rs +++ b/src/librustc_errors/lib.rs @@ -129,7 +129,7 @@ pub trait SourceMapper { fn span_to_filename(&self, sp: Span) -> FileName; fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option; fn call_span_if_macro(&self, sp: Span) -> Span; - fn ensure_source_file_source_present(&self, file_map: Lrc) -> bool; + fn ensure_source_file_source_present(&self, source_file: Lrc) -> bool; fn doctest_offset_line(&self, line: usize) -> usize; } diff --git a/src/librustc_fs_util/lib.rs b/src/librustc_fs_util/lib.rs index ffe420b109..1b0ff4f861 100644 --- a/src/librustc_fs_util/lib.rs +++ b/src/librustc_fs_util/lib.rs @@ -116,13 +116,13 @@ pub fn rename_or_copy_remove, Q: AsRef>(p: P, } #[cfg(unix)] -pub fn path2cstr(p: &Path) -> CString { - use std::os::unix::prelude::*; +pub fn path_to_c_string(p: &Path) -> CString { + use std::os::unix::ffi::OsStrExt; use std::ffi::OsStr; let p: &OsStr = p.as_ref(); CString::new(p.as_bytes()).unwrap() } #[cfg(windows)] -pub fn path2cstr(p: &Path) -> CString { +pub fn path_to_c_string(p: &Path) -> CString { CString::new(p.to_str().unwrap()).unwrap() } diff --git a/src/librustc_incremental/persist/dirty_clean.rs b/src/librustc_incremental/persist/dirty_clean.rs index 58a799bb45..f76086139e 100644 --- a/src/librustc_incremental/persist/dirty_clean.rs +++ b/src/librustc_incremental/persist/dirty_clean.rs @@ -160,7 +160,7 @@ const LABELS_FN_IN_TRAIT: &[&[&str]] = &[ EXTRA_TRAIT, ]; -/// For generic cases like inline-assemply/mod/etc +/// For generic cases like inline-assembly/mod/etc const LABELS_HIR_ONLY: &[&[&str]] = &[ BASE_HIR, ]; diff --git a/src/librustc_incremental/persist/fs.rs b/src/librustc_incremental/persist/fs.rs index dee50f5ab2..75cdefaf49 100644 --- a/src/librustc_incremental/persist/fs.rs +++ b/src/librustc_incremental/persist/fs.rs @@ -490,7 +490,7 @@ fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(),()> { } } -/// Allocate a the lock-file and lock it. +/// Allocate the lock-file and lock it. fn lock_directory(sess: &Session, session_dir: &Path) -> Result<(flock::Lock, PathBuf), ()> { @@ -659,7 +659,7 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { let mut session_directories = FxHashSet::default(); let mut lock_files = FxHashSet::default(); - for dir_entry in try!(crate_directory.read_dir()) { + for dir_entry in crate_directory.read_dir()? { let dir_entry = match dir_entry { Ok(dir_entry) => dir_entry, _ => { @@ -887,7 +887,7 @@ fn all_except_most_recent(deletion_candidates: Vec<(SystemTime, PathBuf, Option< /// into the '\\?\' format, which supports much longer paths. fn safe_remove_dir_all(p: &Path) -> io::Result<()> { if p.exists() { - let canonicalized = try!(p.canonicalize()); + let canonicalized = p.canonicalize()?; std_fs::remove_dir_all(canonicalized) } else { Ok(()) @@ -896,7 +896,7 @@ fn safe_remove_dir_all(p: &Path) -> io::Result<()> { fn safe_remove_file(p: &Path) -> io::Result<()> { if p.exists() { - let canonicalized = try!(p.canonicalize()); + let canonicalized = p.canonicalize()?; std_fs::remove_file(canonicalized) } else { Ok(()) diff --git a/src/librustc_lint/Cargo.toml b/src/librustc_lint/Cargo.toml index f097095abe..fe197e3e2e 100644 --- a/src/librustc_lint/Cargo.toml +++ b/src/librustc_lint/Cargo.toml @@ -16,3 +16,4 @@ rustc_mir = { path = "../librustc_mir"} rustc_target = { path = "../librustc_target" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +rustc_data_structures = { path = "../librustc_data_structures" } diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 7291e27048..7dd1ca3493 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -603,7 +603,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDebugImplementations { }; if self.impling_types.is_none() { - let mut impls = NodeSet(); + let mut impls = NodeSet::default(); cx.tcx.for_each_impl(debug, |d| { if let Some(ty_def) = cx.tcx.type_of(d).ty_adt_def() { if let Some(node_id) = cx.tcx.hir.as_local_node_id(ty_def.did) { @@ -1274,50 +1274,7 @@ impl LintPass for UnusedBrokenConst { lint_array!() } } - -fn validate_const<'a, 'tcx>( - tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, - constant: &ty::Const<'tcx>, - param_env: ty::ParamEnv<'tcx>, - gid: ::rustc::mir::interpret::GlobalId<'tcx>, - what: &str, -) { - let ecx = ::rustc_mir::const_eval::mk_eval_cx(tcx, gid.instance, param_env).unwrap(); - let result = (|| { - let op = ecx.const_to_op(constant)?; - let mut ref_tracking = ::rustc_mir::interpret::RefTracking::new(op); - while let Some((op, mut path)) = ref_tracking.todo.pop() { - ecx.validate_operand( - op, - &mut path, - Some(&mut ref_tracking), - /* const_mode */ true, - )?; - } - Ok(()) - })(); - if let Err(err) = result { - let (trace, span) = ecx.generate_stacktrace(None); - let err = ::rustc::mir::interpret::ConstEvalErr { - error: err, - stacktrace: trace, - span, - }; - let err = err.struct_error( - tcx.at(span), - &format!("this {} likely exhibits undefined behavior", what), - ); - if let Some(mut err) = err { - err.note("The rules on what exactly is undefined behavior aren't clear, \ - so this check might be overzealous. Please open an issue on the rust compiler \ - repository if you believe it should not be considered undefined behavior", - ); - err.emit(); - } - } -} - -fn check_const(cx: &LateContext, body_id: hir::BodyId, what: &str) { +fn check_const(cx: &LateContext, body_id: hir::BodyId) { let def_id = cx.tcx.hir.body_owner_def_id(body_id); let is_static = cx.tcx.is_static(def_id).is_some(); let param_env = if is_static { @@ -1330,46 +1287,19 @@ fn check_const(cx: &LateContext, body_id: hir::BodyId, what: &str) { instance: ty::Instance::mono(cx.tcx, def_id), promoted: None }; - match cx.tcx.const_eval(param_env.and(cid)) { - Ok(val) => validate_const(cx.tcx, val, param_env, cid, what), - Err(err) => { - // errors for statics are already reported directly in the query, avoid duplicates - if !is_static { - let span = cx.tcx.def_span(def_id); - err.report_as_lint( - cx.tcx.at(span), - &format!("this {} cannot be used", what), - cx.current_lint_root(), - ); - } - }, - } -} - -struct UnusedBrokenConstVisitor<'a, 'tcx: 'a>(&'a LateContext<'a, 'tcx>); - -impl<'a, 'tcx, 'v> hir::intravisit::Visitor<'v> for UnusedBrokenConstVisitor<'a, 'tcx> { - fn visit_nested_body(&mut self, id: hir::BodyId) { - check_const(self.0, id, "array length"); - } - fn nested_visit_map<'this>(&'this mut self) -> hir::intravisit::NestedVisitorMap<'this, 'v> { - hir::intravisit::NestedVisitorMap::None - } + // trigger the query once for all constants since that will already report the errors + let _ = cx.tcx.const_eval(param_env.and(cid)); } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedBrokenConst { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { match it.node { hir::ItemKind::Const(_, body_id) => { - check_const(cx, body_id, "constant"); + check_const(cx, body_id); }, hir::ItemKind::Static(_, _, body_id) => { - check_const(cx, body_id, "static"); + check_const(cx, body_id); }, - hir::ItemKind::Ty(ref ty, _) => hir::intravisit::walk_ty( - &mut UnusedBrokenConstVisitor(cx), - ty - ), _ => {}, } } @@ -1586,7 +1516,7 @@ declare_lint! { "detects edition keywords being used as an identifier" } -/// Checks for uses of edtion keywords used as an identifier +/// Checks for uses of edition keywords used as an identifier #[derive(Clone)] pub struct KeywordIdents; diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index f289acc032..71efc5654e 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -29,7 +29,6 @@ #![feature(nll)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] -#![feature(macro_at_most_once_rep)] #[macro_use] extern crate syntax; @@ -40,6 +39,7 @@ extern crate log; extern crate rustc_mir; extern crate rustc_target; extern crate syntax_pos; +extern crate rustc_data_structures; use rustc::lint; use rustc::lint::{LateContext, LateLintPass, LintPass, LintArray}; @@ -317,11 +317,6 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { reference: "issue #51443 ", edition: None, }, - FutureIncompatibleInfo { - id: LintId::of(DUPLICATE_ASSOCIATED_TYPE_BINDINGS), - reference: "issue #50589 ", - edition: None, - }, FutureIncompatibleInfo { id: LintId::of(PROC_MACRO_DERIVE_RESOLUTION_FALLBACK), reference: "issue #50504 ", diff --git a/src/librustc_lint/nonstandard_style.rs b/src/librustc_lint/nonstandard_style.rs index 56d204f15d..40781b0771 100644 --- a/src/librustc_lint/nonstandard_style.rs +++ b/src/librustc_lint/nonstandard_style.rs @@ -121,7 +121,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonCamelCaseTypes { let has_repr_c = it.attrs .iter() .any(|attr| { - attr::find_repr_attrs(cx.tcx.sess.diagnostic(), attr) + attr::find_repr_attrs(&cx.tcx.sess.parse_sess, attr) .iter() .any(|r| r == &attr::ReprC) }); diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 5197876f92..82ace620c8 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -13,7 +13,8 @@ use rustc::hir::Node; use rustc::ty::subst::Substs; use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; -use rustc::ty::layout::{self, IntegerExt, LayoutOf}; +use rustc::ty::layout::{self, IntegerExt, LayoutOf, VariantIdx}; +use rustc_data_structures::indexed_vec::Idx; use util::nodemap::FxHashSet; use lint::{LateContext, LintContext, LintArray}; use lint::{LintPass, LateLintPass}; @@ -377,13 +378,13 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { let (t, actually) = match ty { ty::Int(t) => { let ity = attr::IntType::SignedInt(t); - let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits(); + let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits(); let actually = (val << (128 - bits)) as i128 >> (128 - bits); (format!("{:?}", t), actually.to_string()) } ty::Uint(t) => { let ity = attr::IntType::UnsignedInt(t); - let bits = layout::Integer::from_attr(cx.tcx, ity).size().bits(); + let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits(); let actually = (val << (128 - bits)) >> (128 - bits); (format!("{:?}", t), actually.to_string()) } @@ -452,10 +453,13 @@ fn is_repr_nullable_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if def.variants.len() == 2 { let data_idx; - if def.variants[0].fields.is_empty() { - data_idx = 1; - } else if def.variants[1].fields.is_empty() { - data_idx = 0; + let zero = VariantIdx::new(0); + let one = VariantIdx::new(1); + + if def.variants[zero].fields.is_empty() { + data_idx = one; + } else if def.variants[one].fields.is_empty() { + data_idx = zero; } else { return false; } @@ -718,10 +722,12 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { ty::Param(..) | ty::Infer(..) | + ty::Bound(..) | ty::Error | ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(..) | + ty::Placeholder(..) | ty::UnnormalizedProjection(..) | ty::Projection(..) | ty::Opaque(..) | @@ -794,7 +800,7 @@ impl LintPass for ImproperCTypes { impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ImproperCTypes { fn check_foreign_item(&mut self, cx: &LateContext, it: &hir::ForeignItem) { - let mut vis = ImproperCTypesVisitor { cx: cx }; + let mut vis = ImproperCTypesVisitor { cx }; let abi = cx.tcx.hir.get_foreign_abi(it.id); if abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic { match it.node { @@ -828,7 +834,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { Ok(layout) => { let variants = &layout.variants; if let layout::Variants::Tagged { ref variants, ref tag, .. } = variants { - let discr_size = tag.value.size(cx.tcx).bytes(); + let discr_size = tag.value.size(&cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", t, layout.size.bytes(), layout); diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index 049bcc3010..fab618d9c8 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -60,18 +60,39 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { } let t = cx.tables.expr_ty(&expr); - // FIXME(varkor): replace with `t.is_unit() || t.conservative_is_uninhabited()`. - let type_permits_no_use = match t.sty { - ty::Tuple(ref tys) if tys.is_empty() => true, - ty::Never => true, - ty::Adt(def, _) => { - if def.variants.is_empty() { - true - } else { - check_must_use(cx, def.did, s.span, "") + let type_permits_lack_of_use = if t.is_unit() + || cx.tcx.is_ty_uninhabited_from(cx.tcx.hir.get_module_parent(expr.id), t) { + true + } else { + match t.sty { + ty::Adt(def, _) => check_must_use(cx, def.did, s.span, "", ""), + ty::Opaque(def, _) => { + let mut must_use = false; + for (predicate, _) in &cx.tcx.predicates_of(def).predicates { + if let ty::Predicate::Trait(ref poly_trait_predicate) = predicate { + let trait_ref = poly_trait_predicate.skip_binder().trait_ref; + if check_must_use(cx, trait_ref.def_id, s.span, "implementer of ", "") { + must_use = true; + break; + } + } + } + must_use } + ty::Dynamic(binder, _) => { + let mut must_use = false; + for predicate in binder.skip_binder().iter() { + if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate { + if check_must_use(cx, trait_ref.def_id, s.span, "", " trait object") { + must_use = true; + break; + } + } + } + must_use + } + _ => false, } - _ => false, }; let mut fn_warned = false; @@ -98,8 +119,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { }; if let Some(def) = maybe_def { let def_id = def.def_id(); - fn_warned = check_must_use(cx, def_id, s.span, "return value of "); - } else if type_permits_no_use { + fn_warned = check_must_use(cx, def_id, s.span, "return value of ", ""); + } else if type_permits_lack_of_use { // We don't warn about unused unit or uninhabited types. // (See https://github.com/rust-lang/rust/issues/43806 for details.) return; @@ -148,15 +169,21 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { op_warned = true; } - if !(type_permits_no_use || fn_warned || op_warned) { + if !(type_permits_lack_of_use || fn_warned || op_warned) { cx.span_lint(UNUSED_RESULTS, s.span, "unused result"); } - fn check_must_use(cx: &LateContext, def_id: DefId, sp: Span, describe_path: &str) -> bool { + fn check_must_use( + cx: &LateContext, + def_id: DefId, + sp: Span, + descr_pre_path: &str, + descr_post_path: &str, + ) -> bool { for attr in cx.tcx.get_attrs(def_id).iter() { if attr.check_name("must_use") { - let msg = format!("unused {}`{}` that must be used", - describe_path, cx.tcx.item_path_str(def_id)); + let msg = format!("unused {}`{}`{} that must be used", + descr_pre_path, cx.tcx.item_path_str(def_id), descr_post_path); let mut err = cx.struct_span_lint(UNUSED_MUST_USE, sp, &msg); // check for #[must_use = "..."] if let Some(note) = attr.value_str() { @@ -279,10 +306,9 @@ impl UnusedParens { msg: &str, followed_by_block: bool) { if let ast::ExprKind::Paren(ref inner) = value.node { - let necessary = followed_by_block && if let ast::ExprKind::Ret(_) = inner.node { - true - } else { - parser::contains_exterior_struct_lit(&inner) + let necessary = followed_by_block && match inner.node { + ast::ExprKind::Ret(_) | ast::ExprKind::Break(..) => true, + _ => parser::contains_exterior_struct_lit(&inner), }; if !necessary { let expr_text = if let Ok(snippet) = cx.sess().source_map() diff --git a/src/librustc_lsan/Cargo.toml b/src/librustc_lsan/Cargo.toml index 9c19b53742..2573825a5f 100644 --- a/src/librustc_lsan/Cargo.toml +++ b/src/librustc_lsan/Cargo.toml @@ -15,6 +15,5 @@ cmake = "0.1.18" [dependencies] alloc = { path = "../liballoc" } -alloc_system = { path = "../liballoc_system" } core = { path = "../libcore" } compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/librustc_lsan/lib.rs b/src/librustc_lsan/lib.rs index 7b845e631f..47f917e40c 100644 --- a/src/librustc_lsan/lib.rs +++ b/src/librustc_lsan/lib.rs @@ -9,7 +9,6 @@ // except according to those terms. #![sanitizer_runtime] -#![feature(alloc_system)] #![feature(nll)] #![feature(sanitizer_runtime)] #![feature(staged_api)] @@ -17,10 +16,3 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] - -extern crate alloc_system; - -use alloc_system::System; - -#[global_allocator] -static ALLOC: System = System; diff --git a/src/librustc_metadata/Cargo.toml b/src/librustc_metadata/Cargo.toml index 6142fe7814..337c87c24b 100644 --- a/src/librustc_metadata/Cargo.toml +++ b/src/librustc_metadata/Cargo.toml @@ -11,13 +11,13 @@ crate-type = ["dylib"] [dependencies] flate2 = "1.0" log = "0.4" -proc_macro = { path = "../libproc_macro" } +memmap = "0.6" rustc = { path = "../librustc" } -rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } +rustc_target = { path = "../librustc_target" } serialize = { path = "../libserialize" } +stable_deref_trait = "1.0.0" syntax = { path = "../libsyntax" } syntax_ext = { path = "../libsyntax_ext" } syntax_pos = { path = "../libsyntax_pos" } -rustc_metadata_utils = { path = "../librustc_metadata_utils" } diff --git a/src/librustc_metadata/creader.rs b/src/librustc_metadata/creader.rs index 4b96735eb7..4ff29f5c04 100644 --- a/src/librustc_metadata/creader.rs +++ b/src/librustc_metadata/creader.rs @@ -16,7 +16,7 @@ use decoder::proc_macro_def_path_table; use schema::CrateRoot; use rustc_data_structures::sync::{Lrc, RwLock, Lock}; -use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX}; +use rustc::hir::def_id::CrateNum; use rustc_data_structures::svh::Svh; use rustc::middle::allocator::AllocatorKind; use rustc::middle::cstore::DepKind; @@ -30,15 +30,12 @@ use rustc::util::common::record_time; use rustc::util::nodemap::FxHashSet; use rustc::hir::map::Definitions; -use rustc_metadata_utils::validate_crate_name; - use std::ops::Deref; use std::path::PathBuf; use std::{cmp, fs}; use syntax::ast; use syntax::attr; -use syntax::edition::Edition; use syntax::ext::base::SyntaxExtension; use syntax::symbol::Symbol; use syntax::visit; @@ -233,7 +230,7 @@ impl<'a> CrateLoader<'a> { let dependencies: Vec = cnum_map.iter().cloned().collect(); - let proc_macros = crate_root.macro_derive_registrar.map(|_| { + let proc_macros = crate_root.proc_macro_decls_static.map(|_| { self.load_derive_macros(&crate_root, dylib.clone().map(|p| p.0), span) }); @@ -341,7 +338,7 @@ impl<'a> CrateLoader<'a> { match result { LoadResult::Previous(cnum) => { let data = self.cstore.get_crate_data(cnum); - if data.root.macro_derive_registrar.is_some() { + if data.root.proc_macro_decls_static.is_some() { dep_kind = DepKind::UnexportedMacrosOnly; } data.dep_kind.with_lock(|data_dep_kind| { @@ -433,7 +430,7 @@ impl<'a> CrateLoader<'a> { dep_kind: DepKind) -> cstore::CrateNumMap { debug!("resolving deps of external crate"); - if crate_root.macro_derive_registrar.is_some() { + if crate_root.proc_macro_decls_static.is_some() { return cstore::CrateNumMap::new(); } @@ -535,9 +532,8 @@ impl<'a> CrateLoader<'a> { fn load_derive_macros(&mut self, root: &CrateRoot, dylib: Option, span: Span) -> Vec<(ast::Name, Lrc)> { use std::{env, mem}; - use proc_macro::TokenStream; - use proc_macro::__internal::Registry; use dynamic_lib::DynamicLibrary; + use proc_macro::bridge::client::ProcMacro; use syntax_ext::deriving::custom::ProcMacroDerive; use syntax_ext::proc_macro_impl::{AttrProcMacro, BangProcMacro}; @@ -552,61 +548,49 @@ impl<'a> CrateLoader<'a> { Err(err) => self.sess.span_fatal(span, &err), }; - let sym = self.sess.generate_derive_registrar_symbol(root.disambiguator); - let registrar = unsafe { + let sym = self.sess.generate_proc_macro_decls_symbol(root.disambiguator); + let decls = unsafe { let sym = match lib.symbol(&sym) { Ok(f) => f, Err(err) => self.sess.span_fatal(span, &err), }; - mem::transmute::<*mut u8, fn(&mut dyn Registry)>(sym) + *(sym as *const &[ProcMacro]) }; - struct MyRegistrar { - extensions: Vec<(ast::Name, Lrc)>, - edition: Edition, - } - - impl Registry for MyRegistrar { - fn register_custom_derive(&mut self, - trait_name: &str, - expand: fn(TokenStream) -> TokenStream, - attributes: &[&'static str]) { - let attrs = attributes.iter().cloned().map(Symbol::intern).collect::>(); - let derive = ProcMacroDerive::new(expand, attrs.clone()); - let derive = SyntaxExtension::ProcMacroDerive( - Box::new(derive), attrs, self.edition - ); - self.extensions.push((Symbol::intern(trait_name), Lrc::new(derive))); + let extensions = decls.iter().map(|&decl| { + match decl { + ProcMacro::CustomDerive { trait_name, attributes, client } => { + let attrs = attributes.iter().cloned().map(Symbol::intern).collect::>(); + (trait_name, SyntaxExtension::ProcMacroDerive( + Box::new(ProcMacroDerive { + client, + attrs: attrs.clone(), + }), + attrs, + root.edition, + )) + } + ProcMacro::Attr { name, client } => { + (name, SyntaxExtension::AttrProcMacro( + Box::new(AttrProcMacro { client }), + root.edition, + )) + } + ProcMacro::Bang { name, client } => { + (name, SyntaxExtension::ProcMacro { + expander: Box::new(BangProcMacro { client }), + allow_internal_unstable: false, + edition: root.edition, + }) + } } - - fn register_attr_proc_macro(&mut self, - name: &str, - expand: fn(TokenStream, TokenStream) -> TokenStream) { - let expand = SyntaxExtension::AttrProcMacro( - Box::new(AttrProcMacro { inner: expand }), self.edition - ); - self.extensions.push((Symbol::intern(name), Lrc::new(expand))); - } - - fn register_bang_proc_macro(&mut self, - name: &str, - expand: fn(TokenStream) -> TokenStream) { - let expand = SyntaxExtension::ProcMacro { - expander: Box::new(BangProcMacro { inner: expand }), - allow_internal_unstable: false, - edition: self.edition, - }; - self.extensions.push((Symbol::intern(name), Lrc::new(expand))); - } - } - - let mut my_registrar = MyRegistrar { extensions: Vec::new(), edition: root.edition }; - registrar(&mut my_registrar); + }).map(|(name, ext)| (Symbol::intern(name), Lrc::new(ext))).collect(); // Intentionally leak the dynamic library. We can't ever unload it // since the library can make things that will live arbitrarily long. mem::forget(lib); - my_registrar.extensions + + extensions } /// Look for a plugin registrar. Returns library path, crate @@ -866,7 +850,6 @@ impl<'a> CrateLoader<'a> { needs_allocator = needs_allocator || data.root.needs_allocator; }); if !needs_allocator { - self.sess.injected_allocator.set(None); self.sess.allocator_kind.set(None); return } @@ -874,20 +857,15 @@ impl<'a> CrateLoader<'a> { // At this point we've determined that we need an allocator. Let's see // if our compilation session actually needs an allocator based on what // we're emitting. - let mut need_lib_alloc = false; - let mut need_exe_alloc = false; - for ct in self.sess.crate_types.borrow().iter() { - match *ct { - config::CrateType::Executable => need_exe_alloc = true, - config::CrateType::Dylib | - config::CrateType::ProcMacro | - config::CrateType::Cdylib | - config::CrateType::Staticlib => need_lib_alloc = true, - config::CrateType::Rlib => {} - } - } - if !need_lib_alloc && !need_exe_alloc { - self.sess.injected_allocator.set(None); + let all_rlib = self.sess.crate_types.borrow() + .iter() + .all(|ct| { + match *ct { + config::CrateType::Rlib => true, + _ => false, + } + }); + if all_rlib { self.sess.allocator_kind.set(None); return } @@ -926,103 +904,27 @@ impl<'a> CrateLoader<'a> { }); if global_allocator.is_some() { self.sess.allocator_kind.set(Some(AllocatorKind::Global)); - self.sess.injected_allocator.set(None); return } // Ok we haven't found a global allocator but we still need an - // allocator. At this point we'll either fall back to the "library - // allocator" or the "exe allocator" depending on a few variables. Let's - // figure out which one. - // - // Note that here we favor linking to the "library allocator" as much as - // possible. If we're not creating rustc's version of libstd - // (need_lib_alloc and prefer_dynamic) then we select `None`, and if the - // exe allocation crate doesn't exist for this target then we also - // select `None`. - let exe_allocation_crate_data = - if need_lib_alloc && !self.sess.opts.cg.prefer_dynamic { - None - } else { - self.sess - .target - .target - .options - .exe_allocation_crate - .as_ref() - .map(|name| { - // We've determined that we're injecting an "exe allocator" which means - // that we're going to load up a whole new crate. An example of this is - // that we're producing a normal binary on Linux which means we need to - // load the `alloc_jemalloc` crate to link as an allocator. - let name = Symbol::intern(name); - let (cnum, data) = self.resolve_crate(&None, - name, - name, - None, - None, - DUMMY_SP, - PathKind::Crate, - DepKind::Implicit) - .unwrap_or_else(|err| err.report()); - self.sess.injected_allocator.set(Some(cnum)); - data - }) - }; - - let allocation_crate_data = exe_allocation_crate_data.or_else(|| { - // No allocator was injected - self.sess.injected_allocator.set(None); - - if attr::contains_name(&krate.attrs, "default_lib_allocator") { - // Prefer self as the allocator if there's a collision - return None; + // allocator. At this point our allocator request is typically fulfilled + // by the standard library, denoted by the `#![default_lib_allocator]` + // attribute. + let mut has_default = attr::contains_name(&krate.attrs, "default_lib_allocator"); + self.cstore.iter_crate_data(|_, data| { + if data.root.has_default_lib_allocator { + has_default = true; } - // We're not actually going to inject an allocator, we're going to - // require that something in our crate graph is the default lib - // allocator. This is typically libstd, so this'll rarely be an - // error. - let mut allocator = None; - self.cstore.iter_crate_data(|_, data| { - if allocator.is_none() && data.root.has_default_lib_allocator { - allocator = Some(data.clone()); - } - }); - allocator }); - match allocation_crate_data { - Some(data) => { - // We have an allocator. We detect separately what kind it is, to allow for some - // flexibility in misconfiguration. - let attrs = data.get_item_attrs(CRATE_DEF_INDEX, self.sess); - let kind_interned = attr::first_attr_value_str_by_name(&attrs, "rustc_alloc_kind") - .map(Symbol::as_str); - let kind_str = kind_interned - .as_ref() - .map(|s| s as &str); - let alloc_kind = match kind_str { - None | - Some("lib") => AllocatorKind::DefaultLib, - Some("exe") => AllocatorKind::DefaultExe, - Some(other) => { - self.sess.err(&format!("Allocator kind {} not known", other)); - return; - } - }; - self.sess.allocator_kind.set(Some(alloc_kind)); - }, - None => { - if !attr::contains_name(&krate.attrs, "default_lib_allocator") { - self.sess.err("no global memory allocator found but one is \ - required; link to std or \ - add #[global_allocator] to a static item \ - that implements the GlobalAlloc trait."); - return; - } - self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib)); - } + if !has_default { + self.sess.err("no global memory allocator found but one is \ + required; link to std or \ + add #[global_allocator] to a static item \ + that implements the GlobalAlloc trait."); } + self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib)); fn has_global_allocator(krate: &ast::Crate) -> bool { struct Finder(bool); @@ -1085,8 +987,6 @@ impl<'a> CrateLoader<'a> { impl<'a> CrateLoader<'a> { pub fn postprocess(&mut self, krate: &ast::Crate) { - // inject the sanitizer runtime before the allocator runtime because all - // sanitizers force the use of the `alloc_system` allocator self.inject_sanitizer_runtime(); self.inject_profiler_runtime(); self.inject_allocator_crate(krate); @@ -1106,7 +1006,7 @@ impl<'a> CrateLoader<'a> { item.ident, orig_name); let orig_name = match orig_name { Some(orig_name) => { - validate_crate_name(Some(self.sess), &orig_name.as_str(), + ::validate_crate_name(Some(self.sess), &orig_name.as_str(), Some(item.span)); orig_name } diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index e6e1367b59..d0fa63a616 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -103,9 +103,9 @@ provide! { <'tcx> tcx, def_id, other, cdata, generics_of => { tcx.alloc_generics(cdata.get_generics(def_id.index, tcx.sess)) } - predicates_of => { cdata.get_predicates(def_id.index, tcx) } - predicates_defined_on => { cdata.get_predicates_defined_on(def_id.index, tcx) } - super_predicates_of => { cdata.get_super_predicates(def_id.index, tcx) } + predicates_of => { Lrc::new(cdata.get_predicates(def_id.index, tcx)) } + predicates_defined_on => { Lrc::new(cdata.get_predicates_defined_on(def_id.index, tcx)) } + super_predicates_of => { Lrc::new(cdata.get_super_predicates(def_id.index, tcx)) } trait_def => { tcx.alloc_trait_def(cdata.get_trait_def(def_id.index, tcx.sess)) } @@ -203,8 +203,8 @@ provide! { <'tcx> tcx, def_id, other, cdata, DefId { krate: def_id.krate, index } }) } - derive_registrar_fn => { - cdata.root.macro_derive_registrar.map(|index| { + proc_macro_decls_static => { + cdata.root.proc_macro_decls_static.map(|index| { DefId { krate: def_id.krate, index } }) } @@ -316,7 +316,7 @@ pub fn provide<'tcx>(providers: &mut Providers<'tcx>) { use std::collections::hash_map::Entry; assert_eq!(cnum, LOCAL_CRATE); - let mut visible_parent_map: DefIdMap = DefIdMap(); + let mut visible_parent_map: DefIdMap = Default::default(); // Issue 46112: We want the map to prefer the shortest // paths when reporting the path to an item. Therefore we @@ -431,8 +431,9 @@ impl cstore::CStore { use syntax::ext::base::SyntaxExtension; use syntax_ext::proc_macro_impl::BangProcMacro; + let client = ::proc_macro::bridge::client::Client::expand1(::proc_macro::quote); let ext = SyntaxExtension::ProcMacro { - expander: Box::new(BangProcMacro { inner: ::proc_macro::quote }), + expander: Box::new(BangProcMacro { client }), allow_internal_unstable: true, edition: data.root.edition, }; diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 9864c1f3d7..1c7e3c95d1 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -400,7 +400,7 @@ impl<'a, 'tcx> MetadataBlob { for (i, dep) in root.crate_deps .decode(self) .enumerate() { - write!(out, "{} {}-{}\n", i + 1, dep.name, dep.hash)?; + write!(out, "{} {}{}\n", i + 1, dep.name, dep.extra_filename)?; } write!(out, "\n")?; Ok(()) @@ -601,7 +601,7 @@ impl<'a, 'tcx> CrateMetadata { }) .collect() } else { - vec![self.get_variant(tcx, &item, item_id, kind)] + std::iter::once(self.get_variant(tcx, &item, item_id, kind)).collect() }; tcx.alloc_adt_def(did, kind, variants, repr) diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index 26f977c6b5..2736c60ffb 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -27,6 +27,7 @@ use rustc::mir::{self, interpret}; use rustc::traits::specialization_graph; use rustc::ty::{self, Ty, TyCtxt, ReprOptions, SymbolName}; use rustc::ty::codec::{self as ty_codec, TyEncoder}; +use rustc::ty::layout::VariantIdx; use rustc::session::config::{self, CrateType}; use rustc::util::nodemap::FxHashMap; @@ -323,7 +324,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { index.record(DefId::local(CRATE_DEF_INDEX), IsolatedEncoder::encode_info_for_mod, FromId(CRATE_NODE_ID, (&krate.module, &krate.attrs, &vis))); - let mut visitor = EncodeVisitor { index: index }; + let mut visitor = EncodeVisitor { index }; krate.visit_all_item_likes(&mut visitor.as_deep_visitor()); for macro_def in &krate.exported_macros { visitor.visit_macro_def(macro_def); @@ -340,7 +341,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let source_map = self.tcx.sess.source_map(); let all_source_files = source_map.files(); - let (working_dir, working_dir_was_remapped) = self.tcx.sess.working_dir.clone(); + let (working_dir, _cwd_remapped) = self.tcx.sess.working_dir.clone(); let adapted = all_source_files.iter() .filter(|source_file| { @@ -349,32 +350,26 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { !source_file.is_imported() }) .map(|source_file| { - // When exporting SourceFiles, we expand all paths to absolute - // paths because any relative paths are potentially relative to - // a wrong directory. - // However, if a path has been modified via - // `--remap-path-prefix` we assume the user has already set - // things up the way they want and don't touch the path values - // anymore. match source_file.name { + // This path of this SourceFile has been modified by + // path-remapping, so we use it verbatim (and avoid + // cloning the whole map in the process). + _ if source_file.name_was_remapped => source_file.clone(), + + // Otherwise expand all paths to absolute paths because + // any relative paths are potentially relative to a + // wrong directory. FileName::Real(ref name) => { - if source_file.name_was_remapped || - (name.is_relative() && working_dir_was_remapped) { - // This path of this SourceFile has been modified by - // path-remapping, so we use it verbatim (and avoid cloning - // the whole map in the process). - source_file.clone() - } else { - let mut adapted = (**source_file).clone(); - adapted.name = Path::new(&working_dir).join(name).into(); - adapted.name_hash = { - let mut hasher: StableHasher = StableHasher::new(); - adapted.name.hash(&mut hasher); - hasher.finish() - }; - Lrc::new(adapted) - } + let mut adapted = (**source_file).clone(); + adapted.name = Path::new(&working_dir).join(name).into(); + adapted.name_hash = { + let mut hasher: StableHasher = StableHasher::new(); + adapted.name.hash(&mut hasher); + hasher.finish() + }; + Lrc::new(adapted) }, + // expanded code, not from a file _ => source_file.clone(), } @@ -501,8 +496,8 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { .plugin_registrar_fn .get() .map(|id| tcx.hir.local_def_id(id).index), - macro_derive_registrar: if is_proc_macro { - let id = tcx.sess.derive_registrar_fn.get().unwrap(); + proc_macro_decls_static: if is_proc_macro { + let id = tcx.sess.proc_macro_decls_static.get().unwrap(); Some(tcx.hir.local_def_id(id).index) } else { None @@ -586,7 +581,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { /// the right to access any information in the adt-def (including, /// e.g., the length of the various vectors). fn encode_enum_variant_info(&mut self, - (enum_did, Untracked(index)): (DefId, Untracked)) + (enum_did, Untracked(index)): (DefId, Untracked)) -> Entry<'tcx> { let tcx = self.tcx; let def = tcx.adt_def(enum_did); @@ -681,7 +676,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { /// vectors). fn encode_field(&mut self, (adt_def_id, Untracked((variant_index, field_index))): (DefId, - Untracked<(usize, + Untracked<(VariantIdx, usize)>)) -> Entry<'tcx> { let tcx = self.tcx; @@ -1673,7 +1668,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { fn encode_fields(&mut self, adt_def_id: DefId) { let def = self.tcx.adt_def(adt_def_id); - for (variant_index, variant) in def.variants.iter().enumerate() { + for (variant_index, variant) in def.variants.iter_enumerated() { for (field_index, field) in variant.fields.iter().enumerate() { self.record(field.did, IsolatedEncoder::encode_field, @@ -1740,7 +1735,7 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { self.encode_fields(def_id); let def = self.tcx.adt_def(def_id); - for (i, variant) in def.variants.iter().enumerate() { + for (i, variant) in def.variants.iter_enumerated() { self.record(variant.did, IsolatedEncoder::encode_enum_variant_info, (def_id, Untracked(i))); diff --git a/src/librustc_metadata/lib.rs b/src/librustc_metadata/lib.rs index 7008166b90..ee99f7465b 100644 --- a/src/librustc_metadata/lib.rs +++ b/src/librustc_metadata/lib.rs @@ -14,7 +14,6 @@ #![feature(box_patterns)] #![feature(libc)] -#![feature(macro_at_most_once_rep)] #![feature(nll)] #![feature(proc_macro_internals)] #![feature(proc_macro_quote)] @@ -30,6 +29,8 @@ extern crate libc; #[macro_use] extern crate log; +extern crate memmap; +extern crate stable_deref_trait; #[macro_use] extern crate syntax; extern crate syntax_pos; @@ -38,7 +39,6 @@ extern crate serialize as rustc_serialize; // used by deriving extern crate rustc_errors as errors; extern crate syntax_ext; extern crate proc_macro; -extern crate rustc_metadata_utils; #[macro_use] extern crate rustc; @@ -64,4 +64,34 @@ pub mod cstore; pub mod dynamic_lib; pub mod locator; +pub fn validate_crate_name( + sess: Option<&rustc::session::Session>, + s: &str, + sp: Option +) { + let mut err_count = 0; + { + let mut say = |s: &str| { + match (sp, sess) { + (_, None) => bug!("{}", s), + (Some(sp), Some(sess)) => sess.span_err(sp, s), + (None, Some(sess)) => sess.err(s), + } + err_count += 1; + }; + if s.is_empty() { + say("crate name must not be empty"); + } + for c in s.chars() { + if c.is_alphanumeric() { continue } + if c == '_' { continue } + say(&format!("invalid character `{}` in crate name: `{}`", c, s)); + } + } + + if err_count > 0 { + sess.unwrap().abort_if_errors(); + } +} + __build_diagnostic_array! { librustc_metadata, DIAGNOSTICS } diff --git a/src/librustc_metadata/locator.rs b/src/librustc_metadata/locator.rs index a732446d50..d4e51693d7 100644 --- a/src/librustc_metadata/locator.rs +++ b/src/librustc_metadata/locator.rs @@ -243,12 +243,14 @@ use std::cmp; use std::fmt; use std::fs; use std::io::{self, Read}; +use std::ops::Deref; use std::path::{Path, PathBuf}; use std::time::Instant; use flate2::read::DeflateDecoder; use rustc_data_structures::owning_ref::OwningRef; + pub struct CrateMismatch { path: PathBuf, got: String, @@ -711,7 +713,7 @@ impl<'a> Context<'a> { let root = metadata.get_root(); if let Some(is_proc_macro) = self.is_proc_macro { - if root.macro_derive_registrar.is_some() != is_proc_macro { + if root.proc_macro_decls_static.is_some() != is_proc_macro { return None; } } @@ -856,6 +858,19 @@ fn get_metadata_section(target: &Target, return ret; } +/// A trivial wrapper for `Mmap` that implements `StableDeref`. +struct StableDerefMmap(memmap::Mmap); + +impl Deref for StableDerefMmap { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + self.0.deref() + } +} + +unsafe impl stable_deref_trait::StableDeref for StableDerefMmap {} + fn get_metadata_section_imp(target: &Target, flavor: CrateFlavor, filename: &Path, @@ -892,9 +907,14 @@ fn get_metadata_section_imp(target: &Target, } } CrateFlavor::Rmeta => { - let buf = fs::read(filename).map_err(|_| - format!("failed to read rmeta metadata: '{}'", filename.display()))?; - rustc_erase_owner!(OwningRef::new(buf).map_owner_box()) + // mmap the file, because only a small fraction of it is read. + let file = std::fs::File::open(filename).map_err(|_| + format!("failed to open rmeta metadata: '{}'", filename.display()))?; + let mmap = unsafe { memmap::Mmap::map(&file) }; + let mmap = mmap.map_err(|_| + format!("failed to mmap rmeta metadata: '{}'", filename.display()))?; + + rustc_erase_owner!(OwningRef::new(StableDerefMmap(mmap)).map_owner_box()) } }; let blob = MetadataBlob(raw_bytes); diff --git a/src/librustc_metadata/schema.rs b/src/librustc_metadata/schema.rs index da2a8ae714..e91d15b78c 100644 --- a/src/librustc_metadata/schema.rs +++ b/src/librustc_metadata/schema.rs @@ -196,7 +196,7 @@ pub struct CrateRoot { pub has_panic_handler: bool, pub has_default_lib_allocator: bool, pub plugin_registrar_fn: Option, - pub macro_derive_registrar: Option, + pub proc_macro_decls_static: Option, pub crate_deps: LazySeq, pub dylib_dependency_formats: LazySeq>, diff --git a/src/librustc_metadata_utils/Cargo.toml b/src/librustc_metadata_utils/Cargo.toml deleted file mode 100644 index 4a5e20376b..0000000000 --- a/src/librustc_metadata_utils/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "rustc_metadata_utils" -version = "0.0.0" - -[lib] -name = "rustc_metadata_utils" -path = "lib.rs" -crate-type = ["dylib"] - -[dependencies] -rustc = { path = "../librustc" } -syntax = { path = "../libsyntax" } -syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_metadata_utils/lib.rs b/src/librustc_metadata_utils/lib.rs deleted file mode 100644 index a1e5150390..0000000000 --- a/src/librustc_metadata_utils/lib.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[macro_use] -extern crate rustc; -extern crate syntax_pos; - -use rustc::session::Session; -use syntax_pos::Span; - -pub fn validate_crate_name(sess: Option<&Session>, s: &str, sp: Option) { - let mut err_count = 0; - { - let mut say = |s: &str| { - match (sp, sess) { - (_, None) => bug!("{}", s), - (Some(sp), Some(sess)) => sess.span_err(sp, s), - (None, Some(sess)) => sess.err(s), - } - err_count += 1; - }; - if s.is_empty() { - say("crate name must not be empty"); - } - for c in s.chars() { - if c.is_alphanumeric() { continue } - if c == '_' { continue } - say(&format!("invalid character `{}` in crate name: `{}`", c, s)); - } - } - - if err_count > 0 { - sess.unwrap().abort_if_errors(); - } -} diff --git a/src/librustc_mir/borrow_check/borrow_set.rs b/src/librustc_mir/borrow_check/borrow_set.rs index db56ce4627..fd7dc7fc4b 100644 --- a/src/librustc_mir/borrow_check/borrow_set.rs +++ b/src/librustc_mir/borrow_check/borrow_set.rs @@ -9,6 +9,7 @@ // except according to those terms. use borrow_check::place_ext::PlaceExt; +use borrow_check::nll::ToRegionVid; use dataflow::indexes::BorrowIndex; use dataflow::move_paths::MoveData; use rustc::mir::traversal; @@ -16,12 +17,11 @@ use rustc::mir::visit::{ PlaceContext, Visitor, NonUseContext, MutatingUseContext, NonMutatingUseContext }; use rustc::mir::{self, Location, Mir, Place, Local}; -use rustc::ty::{Region, TyCtxt}; +use rustc::ty::{RegionVid, TyCtxt}; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::bit_set::BitSet; use std::fmt; -use std::hash::Hash; use std::ops::Index; crate struct BorrowSet<'tcx> { @@ -43,7 +43,7 @@ crate struct BorrowSet<'tcx> { /// Every borrow has a region; this maps each such regions back to /// its borrow-indexes. - crate region_map: FxHashMap, FxHashSet>, + crate region_map: FxHashMap>, /// Map from local to all the borrows on that local crate local_map: FxHashMap>, @@ -78,7 +78,7 @@ crate struct BorrowData<'tcx> { /// What kind of borrow this is crate kind: mir::BorrowKind, /// The region for which this borrow is live - crate region: Region<'tcx>, + crate region: RegionVid, /// Place from which we are borrowing crate borrowed_place: mir::Place<'tcx>, /// Place to which the borrow was stored @@ -93,13 +93,7 @@ impl<'tcx> fmt::Display for BorrowData<'tcx> { mir::BorrowKind::Unique => "uniq ", mir::BorrowKind::Mut { .. } => "mut ", }; - let region = self.region.to_string(); - let separator = if !region.is_empty() { - " " - } else { - "" - }; - write!(w, "&{}{}{}{:?}", region, separator, kind, self.borrowed_place) + write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place) } } @@ -190,7 +184,7 @@ struct GatherBorrows<'a, 'gcx: 'tcx, 'tcx: 'a> { idx_vec: IndexVec>, location_map: FxHashMap, activation_map: FxHashMap>, - region_map: FxHashMap, FxHashSet>, + region_map: FxHashMap>, local_map: FxHashMap>, /// When we encounter a 2-phase borrow statement, it will always @@ -220,6 +214,8 @@ impl<'a, 'gcx, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'gcx, 'tcx> { return; } + let region = region.to_region_vid(); + let borrow = BorrowData { kind, region, @@ -231,23 +227,15 @@ impl<'a, 'gcx, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'gcx, 'tcx> { let idx = self.idx_vec.push(borrow); self.location_map.insert(location, idx); - self.insert_as_pending_if_two_phase(location, &assigned_place, region, kind, idx); + self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx); - insert(&mut self.region_map, ®ion, idx); + self.region_map.entry(region).or_default().insert(idx); if let Some(local) = borrowed_place.root_local() { - insert(&mut self.local_map, &local, idx); + self.local_map.entry(local).or_default().insert(idx); } } - return self.super_assign(block, assigned_place, rvalue, location); - - fn insert<'a, K, V>(map: &'a mut FxHashMap>, k: &K, v: V) - where - K: Clone + Eq + Hash, - V: Eq + Hash, - { - map.entry(k.clone()).or_default().insert(v); - } + self.super_assign(block, assigned_place, rvalue, location) } fn visit_place( @@ -323,7 +311,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'gcx, 'tcx> { let borrow_data = &self.idx_vec[borrow_index]; assert_eq!(borrow_data.reserve_location, location); assert_eq!(borrow_data.kind, kind); - assert_eq!(borrow_data.region, region); + assert_eq!(borrow_data.region, region.to_region_vid()); assert_eq!(borrow_data.borrowed_place, *place); } @@ -356,13 +344,12 @@ impl<'a, 'gcx, 'tcx> GatherBorrows<'a, 'gcx, 'tcx> { &mut self, start_location: Location, assigned_place: &mir::Place<'tcx>, - region: Region<'tcx>, kind: mir::BorrowKind, borrow_index: BorrowIndex, ) { debug!( - "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?}, {:?})", - start_location, assigned_place, region, borrow_index, + "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})", + start_location, assigned_place, borrow_index, ); if !self.allow_two_phase_borrow(kind) { diff --git a/src/librustc_mir/borrow_check/error_reporting.rs b/src/librustc_mir/borrow_check/error_reporting.rs index b2b92a6f85..4ccd26bee8 100644 --- a/src/librustc_mir/borrow_check/error_reporting.rs +++ b/src/librustc_mir/borrow_check/error_reporting.rs @@ -788,7 +788,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let what_was_dropped = match self.describe_place(place) { Some(name) => format!("`{}`", name.as_str()), - None => format!("temporary value"), + None => String::from("temporary value"), }; let label = match self.describe_place(&borrow.borrowed_place) { @@ -1028,7 +1028,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { match category { ConstraintCategory::Return => { - err.span_note(constraint_span, &format!("closure is returned here")); + err.span_note(constraint_span, "closure is returned here"); } ConstraintCategory::CallArgument => { fr_name.highlight_region_name(&mut err); @@ -2193,7 +2193,7 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { match ty.sty { ty::TyKind::Ref(ty::RegionKind::ReLateBound(_, br), _, _) | ty::TyKind::Ref( - ty::RegionKind::RePlaceholder(ty::Placeholder { name: br, .. }), + ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }), _, _, ) => with_highlight_region_for_bound_region(*br, counter, || ty.to_string()), @@ -2207,7 +2207,7 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { match ty.sty { ty::TyKind::Ref(region, _, _) => match region { ty::RegionKind::ReLateBound(_, br) - | ty::RegionKind::RePlaceholder(ty::Placeholder { name: br, .. }) => { + | ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => { with_highlight_region_for_bound_region(*br, counter, || region.to_string()) } _ => region.to_string(), diff --git a/src/librustc_mir/borrow_check/location.rs b/src/librustc_mir/borrow_check/location.rs index 91008e8f96..b3e159dd84 100644 --- a/src/librustc_mir/borrow_check/location.rs +++ b/src/librustc_mir/borrow_check/location.rs @@ -11,7 +11,7 @@ use rustc::mir::{BasicBlock, Location, Mir}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -/// Maps between a MIR Location, which identifies the a particular +/// Maps between a MIR Location, which identifies a particular /// statement within a basic block, to a "rich location", which /// identifies at a finer granularity. In particular, we distinguish /// the *start* of a statement and the *mid-point*. The mid-point is diff --git a/src/librustc_mir/borrow_check/mod.rs b/src/librustc_mir/borrow_check/mod.rs index 920040d145..76ba6ae5de 100644 --- a/src/librustc_mir/borrow_check/mod.rs +++ b/src/librustc_mir/borrow_check/mod.rs @@ -14,7 +14,6 @@ use borrow_check::nll::region_infer::RegionInferenceContext; use rustc::hir; use rustc::hir::Node; use rustc::hir::def_id::DefId; -use rustc::hir::map::definitions::DefPathData; use rustc::infer::InferCtxt; use rustc::lint::builtin::UNUSED_MUT; use rustc::middle::borrowck::SignalledError; @@ -162,10 +161,6 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( move_data: move_data, param_env: param_env, }; - let body_id = match tcx.def_key(def_id).disambiguated_data.data { - DefPathData::StructCtor | DefPathData::EnumVariant(_) => None, - _ => Some(tcx.hir.body_owned_by(id)), - }; let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len()); let mut flow_inits = FlowAtLocation::new(do_dataflow( @@ -212,7 +207,7 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( id, &attributes, &dead_unwinds, - Borrows::new(tcx, mir, regioncx.clone(), def_id, body_id, &borrow_set), + Borrows::new(tcx, mir, regioncx.clone(), &borrow_set), |rs, i| DebugFormatted::new(&rs.location(i)), )); let flow_uninits = FlowAtLocation::new(do_dataflow( @@ -281,23 +276,21 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( // Note that this set is expected to be small - only upvars from closures // would have a chance of erroneously adding non-user-defined mutable vars // to the set. - let temporary_used_locals: FxHashSet = mbcx - .used_mut - .iter() + let temporary_used_locals: FxHashSet = mbcx.used_mut.iter() .filter(|&local| mbcx.mir.local_decls[*local].is_user_variable.is_none()) .cloned() .collect(); - mbcx.gather_used_muts(temporary_used_locals); + // For the remaining unused locals that are marked as mutable, we avoid linting any that + // were never initialized. These locals may have been removed as unreachable code; or will be + // linted as unused variables. + let unused_mut_locals = mbcx.mir.mut_vars_iter() + .filter(|local| !mbcx.used_mut.contains(local)) + .collect(); + mbcx.gather_used_muts(temporary_used_locals, unused_mut_locals); debug!("mbcx.used_mut: {:?}", mbcx.used_mut); - let used_mut = mbcx.used_mut; - - for local in mbcx - .mir - .mut_vars_and_args_iter() - .filter(|local| !used_mut.contains(local)) - { + for local in mbcx.mir.mut_vars_and_args_iter().filter(|local| !used_mut.contains(local)) { if let ClearCrossCrate::Set(ref vsi) = mbcx.mir.source_scope_local_data { let local_decl = &mbcx.mir.local_decls[local]; @@ -590,19 +583,16 @@ impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx ); } } - for input in inputs.iter() { + for (_, input) in inputs.iter() { self.consume_operand(context, (input, span), flow_state); } } - StatementKind::EndRegion(ref _rgn) => { - // ignored when consuming results (update to - // flow_state already handled). - } StatementKind::Nop | StatementKind::AscribeUserType(..) - | StatementKind::Validate(..) + | StatementKind::Retag { .. } + | StatementKind::EscapeToRaw { .. } | StatementKind::StorageLive(..) => { - // `Nop`, `AscribeUserType`, `Validate`, and `StorageLive` are irrelevant + // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant // to borrow check. } StatementKind::StorageDead(local) => { @@ -1719,12 +1709,13 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } } - fn check_parent_of_field<'cx, 'gcx, 'tcx>(this: &mut MirBorrowckCtxt<'cx, 'gcx, 'tcx>, - context: Context, - base: &Place<'tcx>, - span: Span, - flow_state: &Flows<'cx, 'gcx, 'tcx>) - { + fn check_parent_of_field<'cx, 'gcx, 'tcx>( + this: &mut MirBorrowckCtxt<'cx, 'gcx, 'tcx>, + context: Context, + base: &Place<'tcx>, + span: Span, + flow_state: &Flows<'cx, 'gcx, 'tcx>, + ) { // rust-lang/rust#21232: Until Rust allows reads from the // initialized parts of partially initialized structs, we // will, starting with the 2018 edition, reject attempts @@ -1776,6 +1767,24 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } if let Some((prefix, mpi)) = shortest_uninit_seen { + // Check for a reassignment into a uninitialized field of a union (for example, + // after a move out). In this case, do not report a error here. There is an + // exception, if this is the first assignment into the union (that is, there is + // no move out from an earlier location) then this is an attempt at initialization + // of the union - we should error in that case. + let tcx = this.infcx.tcx; + if let ty::TyKind::Adt(def, _) = base.ty(this.mir, tcx).to_ty(tcx).sty { + if def.is_union() { + if this.move_data.path_map[mpi].iter().any(|moi| { + this.move_data.moves[*moi].source.is_predecessor_of( + context.loc, this.mir, + ) + }) { + return; + } + } + } + this.report_use_of_moved_or_uninitialized( context, InitializationRequiringAction::PartialAssignment, @@ -1857,7 +1866,10 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { | Write(wk @ WriteKind::StorageDeadOrDrop) | Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shared)) | Write(wk @ WriteKind::MutableBorrow(BorrowKind::Shallow)) => { - if let Err(_place_err) = self.is_mutable(place, is_local_mutation_allowed) { + if let (Err(_place_err), true) = ( + self.is_mutable(place, is_local_mutation_allowed), + self.errors_buffer.is_empty() + ) { if self.infcx.tcx.migrate_borrowck() { // rust-lang/rust#46908: In pure NLL mode this // code path should be unreachable (and thus @@ -1881,12 +1893,11 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { location, ); } else { - self.infcx.tcx.sess.delay_span_bug( + span_bug!( span, - &format!( - "Accessing `{:?}` with the kind `{:?}` shouldn't be possible", - place, kind - ), + "Accessing `{:?}` with the kind `{:?}` shouldn't be possible", + place, + kind, ); } } diff --git a/src/librustc_mir/borrow_check/mutability_errors.rs b/src/librustc_mir/borrow_check/mutability_errors.rs index b71b131570..7afe2c67ad 100644 --- a/src/librustc_mir/borrow_check/mutability_errors.rs +++ b/src/librustc_mir/borrow_check/mutability_errors.rs @@ -180,9 +180,9 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { AccessKind::Move => { err = self.infcx.tcx .cannot_move_out_of(span, &(item_msg + &reason), Origin::Mir); - act = "move"; - acted_on = "moved"; - span + err.span_label(span, "cannot move"); + err.buffer(&mut self.errors_buffer); + return; } AccessKind::Mutate => { err = self.infcx.tcx diff --git a/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs b/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs index 2bf531d1d3..bb9a29b055 100644 --- a/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs +++ b/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs @@ -206,7 +206,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let mir = self.mir; let tcx = self.infcx.tcx; - let borrow_region_vid = regioncx.to_region_vid(borrow.region); + let borrow_region_vid = borrow.region; debug!( "explain_why_borrow_contains_point: borrow_region_vid={:?}", borrow_region_vid diff --git a/src/librustc_mir/borrow_check/nll/invalidation.rs b/src/librustc_mir/borrow_check/nll/invalidation.rs index 002f35880a..8af23a8813 100644 --- a/src/librustc_mir/borrow_check/nll/invalidation.rs +++ b/src/librustc_mir/borrow_check/nll/invalidation.rs @@ -128,17 +128,16 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { ); } } - for input in inputs.iter() { + for (_, input) in inputs.iter() { self.consume_operand(context, input); } } - // EndRegion matters to older NLL/MIR AST borrowck, not to alias NLL - StatementKind::EndRegion(..) | StatementKind::Nop | StatementKind::AscribeUserType(..) | - StatementKind::Validate(..) | + StatementKind::Retag { .. } | + StatementKind::EscapeToRaw { .. } | StatementKind::StorageLive(..) => { - // `Nop`, `AscribeUserType`, `Validate`, and `StorageLive` are irrelevant + // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant // to borrow check. } StatementKind::StorageDead(local) => { diff --git a/src/librustc_mir/borrow_check/nll/mod.rs b/src/librustc_mir/borrow_check/nll/mod.rs index 8fc54b6ff9..0c4140caee 100644 --- a/src/librustc_mir/borrow_check/nll/mod.rs +++ b/src/librustc_mir/borrow_check/nll/mod.rs @@ -107,7 +107,6 @@ pub(in borrow_check) fn compute_regions<'cx, 'gcx, 'tcx>( // Run the MIR type-checker. let MirTypeckResults { constraints, - placeholder_indices, universal_region_relations, } = type_check::type_check( infcx, @@ -123,8 +122,6 @@ pub(in borrow_check) fn compute_regions<'cx, 'gcx, 'tcx>( elements, ); - let placeholder_indices = Rc::new(placeholder_indices); - if let Some(all_facts) = &mut all_facts { all_facts .universal_region @@ -136,11 +133,14 @@ pub(in borrow_check) fn compute_regions<'cx, 'gcx, 'tcx>( // base constraints generated by the type-check. let var_origins = infcx.take_region_var_origins(); let MirTypeckRegionConstraints { + placeholder_indices, + placeholder_index_to_region: _, mut liveness_constraints, outlives_constraints, closure_bounds_mapping, type_tests, } = constraints; + let placeholder_indices = Rc::new(placeholder_indices); constraint_generation::generate_constraints( infcx, diff --git a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs index ccb44c670f..3358e5851f 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs @@ -8,23 +8,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use borrow_check::nll::ConstraintDescription; -use borrow_check::nll::constraints::{OutlivesConstraint}; +use borrow_check::nll::constraints::OutlivesConstraint; use borrow_check::nll::region_infer::RegionInferenceContext; use borrow_check::nll::type_check::Locations; use borrow_check::nll::universal_regions::DefiningTy; -use util::borrowck_errors::{BorrowckErrors, Origin}; +use borrow_check::nll::ConstraintDescription; use rustc::hir::def_id::DefId; use rustc::infer::error_reporting::nice_region_error::NiceRegionError; use rustc::infer::InferCtxt; +use rustc::infer::NLLRegionVariableOrigin; use rustc::mir::{ConstraintCategory, Location, Mir}; use rustc::ty::{self, RegionVid}; use rustc_data_structures::indexed_vec::IndexVec; use rustc_errors::{Diagnostic, DiagnosticBuilder}; use std::collections::VecDeque; +use syntax::errors::Applicability; use syntax::symbol::keywords; use syntax_pos::Span; -use syntax::errors::Applicability; +use util::borrowck_errors::{BorrowckErrors, Origin}; mod region_name; mod var_name; @@ -76,9 +77,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { debug!("best_blame_constraint(from_region={:?})", from_region); // Find all paths - let (path, target_region) = self - .find_constraint_paths_between_regions(from_region, target_test) - .unwrap(); + let (path, target_region) = + self.find_constraint_paths_between_regions(from_region, target_test) + .unwrap(); debug!( "best_blame_constraint: path={:#?}", path.iter() @@ -92,8 +93,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { ); // Classify each of the constraints along the path. - let mut categorized_path: Vec<(ConstraintCategory, bool, Span)> = path - .iter() + let mut categorized_path: Vec<(ConstraintCategory, bool, Span)> = path.iter() .map(|constraint| { if constraint.category == ConstraintCategory::ClosureBounds { self.retrieve_closure_constraint_info(mir, &constraint) @@ -137,13 +137,12 @@ impl<'tcx> RegionInferenceContext<'tcx> { | ConstraintCategory::Boring | ConstraintCategory::BoringNoLocation | ConstraintCategory::Internal => false, - ConstraintCategory::TypeAnnotation - | ConstraintCategory::Return => true, + ConstraintCategory::TypeAnnotation | ConstraintCategory::Return => true, _ => constraint_sup_scc != target_scc, } }); if let Some(i) = best_choice { - return categorized_path[i] + return categorized_path[i]; } // If that search fails, that is.. unusual. Maybe everything @@ -179,6 +178,13 @@ impl<'tcx> RegionInferenceContext<'tcx> { deque.push_back(from_region); while let Some(r) = deque.pop_front() { + debug!( + "find_constraint_paths_between_regions: from_region={:?} r={:?} value={}", + from_region, + r, + self.region_value_str(r), + ); + // Check if we reached the region we were looking for. If so, // we can reconstruct the path that led to it and return it. if target_test(r) { @@ -206,9 +212,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { // enqueue any regions we find, keeping track of how we // reached them. let fr_static = self.universal_regions.fr_static; - for constraint in self.constraint_graph.outgoing_edges(r, - &self.constraints, - fr_static) { + for constraint in self.constraint_graph + .outgoing_edges(r, &self.constraints, fr_static) + { assert_eq!(constraint.sup, r); let sub_region = constraint.sub; if let Trace::NotVisited = context[sub_region] { @@ -240,11 +246,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { ) { debug!("report_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr); - let (category, _, span) = self.best_blame_constraint( - mir, - fr, - |r| r == outlived_fr - ); + let (category, _, span) = self.best_blame_constraint(mir, fr, |r| { + self.provides_universal_region(r, fr, outlived_fr) + }); // Check if we can use one of the "nice region errors". if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) { @@ -260,23 +264,75 @@ impl<'tcx> RegionInferenceContext<'tcx> { self.universal_regions.is_local_free_region(outlived_fr), ); - debug!("report_error: fr_is_local={:?} outlived_fr_is_local={:?} category={:?}", - fr_is_local, outlived_fr_is_local, category); + debug!( + "report_error: fr_is_local={:?} outlived_fr_is_local={:?} category={:?}", + fr_is_local, outlived_fr_is_local, category + ); match (category, fr_is_local, outlived_fr_is_local) { - (ConstraintCategory::Return, true, false) if self.is_closure_fn_mut(infcx, fr) => - self.report_fnmut_error(mir, infcx, mir_def_id, fr, outlived_fr, span, - errors_buffer), - (ConstraintCategory::Assignment, true, false) | - (ConstraintCategory::CallArgument, true, false) => - self.report_escaping_data_error(mir, infcx, mir_def_id, fr, outlived_fr, - category, span, errors_buffer), - _ => - self.report_general_error(mir, infcx, mir_def_id, fr, fr_is_local, - outlived_fr, outlived_fr_is_local, - category, span, errors_buffer), + (ConstraintCategory::Return, true, false) if self.is_closure_fn_mut(infcx, fr) => { + self.report_fnmut_error( + mir, + infcx, + mir_def_id, + fr, + outlived_fr, + span, + errors_buffer, + ) + } + (ConstraintCategory::Assignment, true, false) + | (ConstraintCategory::CallArgument, true, false) => self.report_escaping_data_error( + mir, + infcx, + mir_def_id, + fr, + outlived_fr, + category, + span, + errors_buffer, + ), + _ => self.report_general_error( + mir, + infcx, + mir_def_id, + fr, + fr_is_local, + outlived_fr, + outlived_fr_is_local, + category, + span, + errors_buffer, + ), }; } + /// We have a constraint `fr1: fr2` that is not satisfied, where + /// `fr2` represents some universal region. Here, `r` is some + /// region where we know that `fr1: r` and this function has the + /// job of determining whether `r` is "to blame" for the fact that + /// `fr1: fr2` is required. + /// + /// This is true under two conditions: + /// + /// - `r == fr2` + /// - `fr2` is `'static` and `r` is some placeholder in a universe + /// that cannot be named by `fr1`; in that case, we will require + /// that `fr1: 'static` because it is the only way to `fr1: r` to + /// be satisfied. (See `add_incompatible_universe`.) + fn provides_universal_region(&self, r: RegionVid, fr1: RegionVid, fr2: RegionVid) -> bool { + debug!( + "provides_universal_region(r={:?}, fr1={:?}, fr2={:?})", + r, fr1, fr2 + ); + let result = { + r == fr2 || { + fr2 == self.universal_regions.fr_static && self.cannot_name_placeholder(fr1, r) + } + }; + debug!("provides_universal_region: result = {:?}", result); + result + } + /// Report a specialized error when `FnMut` closures return a reference to a captured variable. /// This function expects `fr` to be local and `outlived_fr` to not be local. /// @@ -303,10 +359,10 @@ impl<'tcx> RegionInferenceContext<'tcx> { span: Span, errors_buffer: &mut Vec, ) { - let mut diag = infcx.tcx.sess.struct_span_err( - span, - "captured variable cannot escape `FnMut` closure body", - ); + let mut diag = infcx + .tcx + .sess + .struct_span_err(span, "captured variable cannot escape `FnMut` closure body"); // We should check if the return type of this closure is in fact a closure - in that // case, we can special case the error further. @@ -318,27 +374,28 @@ impl<'tcx> RegionInferenceContext<'tcx> { "returns a reference to a captured variable which escapes the closure body" }; - diag.span_label( - span, - message, - ); + diag.span_label(span, message); - match self.give_region_a_name(infcx, mir, mir_def_id, outlived_fr, &mut 1).source { - RegionNameSource::NamedEarlyBoundRegion(fr_span) | - RegionNameSource::NamedFreeRegion(fr_span) | - RegionNameSource::SynthesizedFreeEnvRegion(fr_span, _) | - RegionNameSource::CannotMatchHirTy(fr_span, _) | - RegionNameSource::MatchedHirTy(fr_span) | - RegionNameSource::MatchedAdtAndSegment(fr_span) | - RegionNameSource::AnonRegionFromUpvar(fr_span, _) | - RegionNameSource::AnonRegionFromOutput(fr_span, _, _) => { + match self.give_region_a_name(infcx, mir, mir_def_id, outlived_fr, &mut 1) + .source + { + RegionNameSource::NamedEarlyBoundRegion(fr_span) + | RegionNameSource::NamedFreeRegion(fr_span) + | RegionNameSource::SynthesizedFreeEnvRegion(fr_span, _) + | RegionNameSource::CannotMatchHirTy(fr_span, _) + | RegionNameSource::MatchedHirTy(fr_span) + | RegionNameSource::MatchedAdtAndSegment(fr_span) + | RegionNameSource::AnonRegionFromUpvar(fr_span, _) + | RegionNameSource::AnonRegionFromOutput(fr_span, _, _) => { diag.span_label(fr_span, "inferred to be a `FnMut` closure"); - }, - _ => {}, + } + _ => {} } - diag.note("`FnMut` closures only have access to their captured variables while they are \ - executing..."); + diag.note( + "`FnMut` closures only have access to their captured variables while they are \ + executing...", + ); diag.note("...therefore, they cannot allow references to captured variables to escape"); diag.buffer(errors_buffer); @@ -375,7 +432,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { DefiningTy::Closure(..) => "closure", DefiningTy::Generator(..) => "generator", DefiningTy::FnDef(..) => "function", - DefiningTy::Const(..) => "const" + DefiningTy::Const(..) => "const", }; // Revert to the normal error in these cases. @@ -384,12 +441,23 @@ impl<'tcx> RegionInferenceContext<'tcx> { || (category == ConstraintCategory::Assignment && escapes_from == "function") || escapes_from == "const" { - return self.report_general_error(mir, infcx, mir_def_id, - fr, true, outlived_fr, false, - category, span, errors_buffer); + return self.report_general_error( + mir, + infcx, + mir_def_id, + fr, + true, + outlived_fr, + false, + category, + span, + errors_buffer, + ); } - let mut diag = infcx.tcx.borrowed_data_escapes_closure(span, escapes_from, Origin::Mir); + let mut diag = infcx + .tcx + .borrowed_data_escapes_closure(span, escapes_from, Origin::Mir); if let Some((Some(outlived_fr_name), outlived_fr_span)) = outlived_fr_name_and_span { diag.span_label( @@ -410,7 +478,10 @@ impl<'tcx> RegionInferenceContext<'tcx> { ), ); - diag.span_label(span, format!("`{}` escapes the {} body here", fr_name, escapes_from)); + diag.span_label( + span, + format!("`{}` escapes the {} body here", fr_name, escapes_from), + ); } diag.buffer(errors_buffer); @@ -452,31 +523,41 @@ impl<'tcx> RegionInferenceContext<'tcx> { let counter = &mut 1; let fr_name = self.give_region_a_name(infcx, mir, mir_def_id, fr, counter); fr_name.highlight_region_name(&mut diag); - let outlived_fr_name = self.give_region_a_name( - infcx, mir, mir_def_id, outlived_fr, counter); + let outlived_fr_name = + self.give_region_a_name(infcx, mir, mir_def_id, outlived_fr, counter); outlived_fr_name.highlight_region_name(&mut diag); - let mir_def_name = if infcx.tcx.is_closure(mir_def_id) { "closure" } else { "function" }; + let mir_def_name = if infcx.tcx.is_closure(mir_def_id) { + "closure" + } else { + "function" + }; match (category, outlived_fr_is_local, fr_is_local) { (ConstraintCategory::Return, true, _) => { - diag.span_label(span, format!( - "{} was supposed to return data with lifetime `{}` but it is returning \ - data with lifetime `{}`", - mir_def_name, outlived_fr_name, fr_name - )); - }, + diag.span_label( + span, + format!( + "{} was supposed to return data with lifetime `{}` but it is returning \ + data with lifetime `{}`", + mir_def_name, outlived_fr_name, fr_name + ), + ); + } _ => { - diag.span_label(span, format!( - "{}requires that `{}` must outlive `{}`", - category.description(), fr_name, outlived_fr_name, - )); - }, + diag.span_label( + span, + format!( + "{}requires that `{}` must outlive `{}`", + category.description(), + fr_name, + outlived_fr_name, + ), + ); + } } - self.add_static_impl_trait_suggestion( - infcx, &mut diag, fr, fr_name, outlived_fr, - ); + self.add_static_impl_trait_suggestion(infcx, &mut diag, fr, fr_name, outlived_fr); diag.buffer(errors_buffer); } @@ -499,17 +580,18 @@ impl<'tcx> RegionInferenceContext<'tcx> { fr_name: RegionName, outlived_fr: RegionVid, ) { - if let ( - Some(f), - Some(ty::RegionKind::ReStatic) - ) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) { + if let (Some(f), Some(ty::RegionKind::ReStatic)) = + (self.to_error_region(fr), self.to_error_region(outlived_fr)) + { if let Some(ty::TyS { sty: ty::TyKind::Opaque(did, substs), .. - }) = infcx.tcx.is_suitable_region(f) - .map(|r| r.def_id) - .map(|id| infcx.tcx.return_type_impl_trait(id)) - .unwrap_or(None) + }) = infcx + .tcx + .is_suitable_region(f) + .map(|r| r.def_id) + .map(|id| infcx.tcx.return_type_impl_trait(id)) + .unwrap_or(None) { // Check whether or not the impl trait return type is intended to capture // data with the static lifetime. @@ -522,10 +604,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { let mut found = false; for predicate in bounds.predicates { if let ty::Predicate::TypeOutlives(binder) = predicate { - if let ty::OutlivesPredicate( - _, - ty::RegionKind::ReStatic - ) = binder.skip_binder() { + if let ty::OutlivesPredicate(_, ty::RegionKind::ReStatic) = + binder.skip_binder() + { found = true; break; } @@ -535,18 +616,18 @@ impl<'tcx> RegionInferenceContext<'tcx> { found }; - debug!("add_static_impl_trait_suggestion: has_static_predicate={:?}", - has_static_predicate); + debug!( + "add_static_impl_trait_suggestion: has_static_predicate={:?}", + has_static_predicate + ); let static_str = keywords::StaticLifetime.name(); // If there is a static predicate, then the only sensible suggestion is to replace // fr with `'static`. if has_static_predicate { - diag.help( - &format!( - "consider replacing `{}` with `{}`", - fr_name, static_str, - ), - ); + diag.help(&format!( + "consider replacing `{}` with `{}`", + fr_name, static_str, + )); } else { // Otherwise, we should suggest adding a constraint on the return type. let span = infcx.tcx.def_span(*did); @@ -581,25 +662,48 @@ impl<'tcx> RegionInferenceContext<'tcx> { borrow_region: RegionVid, outlived_region: RegionVid, ) -> (ConstraintCategory, bool, Span, RegionName) { - let (category, from_closure, span) = self.best_blame_constraint( - mir, - borrow_region, - |r| r == outlived_region - ); - let outlived_fr_name = self.give_region_a_name( - infcx, mir, mir_def_id, outlived_region, &mut 1); + let (category, from_closure, span) = + self.best_blame_constraint(mir, borrow_region, |r| r == outlived_region); + let outlived_fr_name = + self.give_region_a_name(infcx, mir, mir_def_id, outlived_region, &mut 1); (category, from_closure, span, outlived_fr_name) } // Finds some region R such that `fr1: R` and `R` is live at // `elem`. crate fn find_sub_region_live_at(&self, fr1: RegionVid, elem: Location) -> RegionVid { - // Find all paths - let (_path, r) = - self.find_constraint_paths_between_regions(fr1, |r| { - self.liveness_constraints.contains(r, elem) - }).unwrap(); - r + debug!("find_sub_region_live_at(fr1={:?}, elem={:?})", fr1, elem); + self.find_constraint_paths_between_regions(fr1, |r| { + // First look for some `r` such that `fr1: r` and `r` is live at `elem` + debug!( + "find_sub_region_live_at: liveness_constraints for {:?} are {:?}", + r, + self.liveness_constraints.region_value_str(r), + ); + self.liveness_constraints.contains(r, elem) + }).or_else(|| { + // If we fail to find that, we may find some `r` such that + // `fr1: r` and `r` is a placeholder from some universe + // `fr1` cannot name. This would force `fr1` to be + // `'static`. + self.find_constraint_paths_between_regions(fr1, |r| { + self.cannot_name_placeholder(fr1, r) + }) + }) + .or_else(|| { + // If we fail to find THAT, it may be that `fr1` is a + // placeholder that cannot "fit" into its SCC. In that + // case, there should be some `r` where `fr1: r`, both + // `fr1` and `r` are in the same SCC, and `fr1` is a + // placeholder that `r` cannot name. We can blame that + // edge. + self.find_constraint_paths_between_regions(fr1, |r| { + self.constraint_sccs.scc(fr1) == self.constraint_sccs.scc(r) + && self.cannot_name_placeholder(r, fr1) + }) + }) + .map(|(_path, r)| r) + .unwrap() } // Finds a good span to blame for the fact that `fr1` outlives `fr2`. @@ -609,34 +713,30 @@ impl<'tcx> RegionInferenceContext<'tcx> { fr1: RegionVid, fr2: RegionVid, ) -> (ConstraintCategory, Span) { - let (category, _, span) = self.best_blame_constraint(mir, fr1, |r| r == fr2); + let (category, _, span) = + self.best_blame_constraint(mir, fr1, |r| self.provides_universal_region(r, fr1, fr2)); (category, span) } fn retrieve_closure_constraint_info( &self, mir: &Mir<'tcx>, - constraint: &OutlivesConstraint + constraint: &OutlivesConstraint, ) -> (ConstraintCategory, bool, Span) { let loc = match constraint.locations { Locations::All(span) => return (constraint.category, false, span), Locations::Single(loc) => loc, }; - let opt_span_category = self - .closure_bounds_mapping[&loc] - .get(&(constraint.sup, constraint.sub)); + let opt_span_category = + self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub)); opt_span_category .map(|&(category, span)| (category, true, span)) .unwrap_or((constraint.category, false, mir.source_info(loc).span)) } /// Returns `true` if a closure is inferred to be an `FnMut` closure. - crate fn is_closure_fn_mut( - &self, - infcx: &InferCtxt<'_, '_, 'tcx>, - fr: RegionVid, - ) -> bool { + crate fn is_closure_fn_mut(&self, infcx: &InferCtxt<'_, '_, 'tcx>, fr: RegionVid) -> bool { if let Some(ty::ReFree(free_region)) = self.to_error_region(fr) { if let ty::BoundRegion::BrEnv = free_region.bound_region { if let DefiningTy::Closure(def_id, substs) = self.universal_regions.defining_ty { @@ -648,4 +748,24 @@ impl<'tcx> RegionInferenceContext<'tcx> { false } + + /// If `r2` represents a placeholder region, then this returns + /// true if `r1` cannot name that placeholder in its + /// value. Otherwise, returns false. + fn cannot_name_placeholder(&self, r1: RegionVid, r2: RegionVid) -> bool { + debug!("cannot_name_value_of(r1={:?}, r2={:?})", r1, r2); + + match self.definitions[r2].origin { + NLLRegionVariableOrigin::Placeholder(placeholder) => { + let universe1 = self.definitions[r1].universe; + debug!( + "cannot_name_value_of: universe1={:?} placeholder={:?}", + universe1, placeholder + ); + universe1.cannot_name(placeholder.universe) + } + + NLLRegionVariableOrigin::FreeRegion | NLLRegionVariableOrigin::Existential => false, + } + } } diff --git a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs index 2b671891fc..a32fb0503a 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs @@ -277,8 +277,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { | ty::RePlaceholder(..) | ty::ReEmpty | ty::ReErased - | ty::ReClosureBound(..) - | ty::ReCanonical(..) => None, + | ty::ReClosureBound(..) => None, } } @@ -688,22 +687,24 @@ impl<'tcx> RegionInferenceContext<'tcx> { let mir_node_id = tcx.hir.as_local_node_id(mir_def_id).expect("non-local mir"); - let (return_span, mir_description) = - if let hir::ExprKind::Closure(_, _, _, span, gen_move) = - tcx.hir.expect_expr(mir_node_id).node - { - ( - tcx.sess.source_map().end_point(span), - if gen_move.is_some() { - " of generator" - } else { - " of closure" - }, - ) - } else { - // unreachable? - (mir.span, "") - }; + let (return_span, mir_description) = match tcx.hir.get(mir_node_id) { + hir::Node::Expr(hir::Expr { + node: hir::ExprKind::Closure(_, _, _, span, gen_move), + .. + }) => ( + tcx.sess.source_map().end_point(*span), + if gen_move.is_some() { + " of generator" + } else { + " of closure" + }, + ), + hir::Node::ImplItem(hir::ImplItem { + node: hir::ImplItemKind::Method(method_sig, _), + .. + }) => (method_sig.decl.output.span(), ""), + _ => (mir.span, ""), + }; Some(RegionName { // This counter value will already have been used, so this function will increment it diff --git a/src/librustc_mir/borrow_check/nll/region_infer/mod.rs b/src/librustc_mir/borrow_check/nll/region_infer/mod.rs index 82e0b3495d..fbde699264 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/mod.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/mod.rs @@ -345,6 +345,13 @@ impl<'tcx> RegionInferenceContext<'tcx> { if scc_universe.can_name(placeholder.universe) { self.scc_values.add_element(scc, placeholder); } else { + debug!( + "init_free_and_bound_regions: placeholder {:?} is \ + not compatible with universe {:?} of its SCC {:?}", + placeholder, + scc_universe, + scc, + ); self.add_incompatible_universe(scc); } } @@ -471,6 +478,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { let mut constraints: Vec<_> = self.constraints.iter().collect(); constraints.sort(); constraints + .into_iter() + .map(|c| (c, self.constraint_sccs.scc(c.sup), self.constraint_sccs.scc(c.sub))) + .collect::>() }); // To propagate constraints, we walk the DAG induced by the @@ -560,6 +570,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// `'a` with `'b` and not `'static`. But it will have to do for /// now. fn add_incompatible_universe(&mut self, scc: ConstraintSccIndex) { + debug!("add_incompatible_universe(scc={:?})", scc); + let fr_static = self.universal_regions.fr_static; self.scc_values.add_all_points(scc); self.scc_values.add_element(scc, fr_static); @@ -1218,7 +1230,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { mir: &Mir<'tcx>, _mir_def_id: DefId, longer_fr: RegionVid, - placeholder: ty::Placeholder, + placeholder: ty::PlaceholderRegion, ) { debug!( "check_bound_universal_region(fr={:?}, placeholder={:?})", @@ -1226,6 +1238,10 @@ impl<'tcx> RegionInferenceContext<'tcx> { ); let longer_fr_scc = self.constraint_sccs.scc(longer_fr); + debug!( + "check_bound_universal_region: longer_fr_scc={:?}", + longer_fr_scc, + ); // If we have some bound universal region `'a`, then the only // elements it can contain is itself -- we don't know anything @@ -1242,6 +1258,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { Some(v) => v, None => return, }; + debug!("check_bound_universal_region: error_element = {:?}", error_element); // Find the region that introduced this `error_element`. let error_region = match error_element { diff --git a/src/librustc_mir/borrow_check/nll/region_infer/values.rs b/src/librustc_mir/borrow_check/nll/region_infer/values.rs index 3607ae4f50..69e2c896d3 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/values.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/values.rs @@ -112,7 +112,7 @@ impl RegionValueElements { } = self.to_location(index); if statement_index == 0 { // If this is a basic block head, then the predecessors are - // the the terminators of other basic blocks + // the terminators of other basic blocks stack.extend( mir.predecessors_for(block) .iter() @@ -150,7 +150,7 @@ crate enum RegionElement { /// A placeholder (e.g., instantiated from a `for<'a> fn(&'a u32)` /// type). - PlaceholderRegion(ty::Placeholder), + PlaceholderRegion(ty::PlaceholderRegion), } /// When we initially compute liveness, we use a bit matrix storing @@ -219,17 +219,17 @@ impl LivenessValues { } } -/// Maps from `ty::Placeholder` values that are used in the rest of +/// Maps from `ty::PlaceholderRegion` values that are used in the rest of /// rustc to the internal `PlaceholderIndex` values that are used in /// NLL. #[derive(Default)] crate struct PlaceholderIndices { - to_index: FxHashMap, - from_index: IndexVec, + to_index: FxHashMap, + from_index: IndexVec, } impl PlaceholderIndices { - crate fn insert(&mut self, placeholder: ty::Placeholder) -> PlaceholderIndex { + crate fn insert(&mut self, placeholder: ty::PlaceholderRegion) -> PlaceholderIndex { let PlaceholderIndices { to_index, from_index, @@ -239,11 +239,11 @@ impl PlaceholderIndices { .or_insert_with(|| from_index.push(placeholder)) } - crate fn lookup_index(&self, placeholder: ty::Placeholder) -> PlaceholderIndex { + crate fn lookup_index(&self, placeholder: ty::PlaceholderRegion) -> PlaceholderIndex { self.to_index[&placeholder] } - crate fn lookup_placeholder(&self, placeholder: PlaceholderIndex) -> ty::Placeholder { + crate fn lookup_placeholder(&self, placeholder: PlaceholderIndex) -> ty::PlaceholderRegion { self.from_index[placeholder] } @@ -375,7 +375,7 @@ impl RegionValues { crate fn placeholders_contained_in<'a>( &'a self, r: N, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { self.placeholders .row(r) .into_iter() @@ -432,7 +432,7 @@ impl ToElementIndex for RegionVid { } } -impl ToElementIndex for ty::Placeholder { +impl ToElementIndex for ty::PlaceholderRegion { fn add_to_row(self, values: &mut RegionValues, row: N) -> bool { let index = values.placeholder_indices.lookup_index(self); values.placeholders.insert(row, index) diff --git a/src/librustc_mir/borrow_check/nll/renumber.rs b/src/librustc_mir/borrow_check/nll/renumber.rs index 363afb87ed..e9f749ac09 100644 --- a/src/librustc_mir/borrow_check/nll/renumber.rs +++ b/src/librustc_mir/borrow_check/nll/renumber.rs @@ -10,7 +10,7 @@ use rustc::ty::subst::Substs; use rustc::ty::{self, ClosureSubsts, GeneratorSubsts, Ty, TypeFoldable}; -use rustc::mir::{BasicBlock, Location, Mir, Statement, StatementKind, UserTypeAnnotation}; +use rustc::mir::{Location, Mir, UserTypeAnnotation}; use rustc::mir::visit::{MutVisitor, TyContext}; use rustc::infer::{InferCtxt, NLLRegionVariableOrigin}; @@ -119,16 +119,4 @@ impl<'a, 'gcx, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'gcx, 'tcx> { debug!("visit_closure_substs: substs={:?}", substs); } - - fn visit_statement( - &mut self, - block: BasicBlock, - statement: &mut Statement<'tcx>, - location: Location, - ) { - if let StatementKind::EndRegion(_) = statement.kind { - statement.kind = StatementKind::Nop; - } - self.super_statement(block, statement, location); - } } diff --git a/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs b/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs index 994f20a011..35ec478143 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint}; +use borrow_check::nll::constraints::OutlivesConstraint; use borrow_check::nll::region_infer::TypeTest; -use borrow_check::nll::type_check::Locations; +use borrow_check::nll::type_check::{Locations, MirTypeckRegionConstraints}; use borrow_check::nll::universal_regions::UniversalRegions; +use borrow_check::nll::ToRegionVid; use rustc::infer::canonical::QueryRegionConstraint; use rustc::infer::outlives::env::RegionBoundPairs; use rustc::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelegate}; use rustc::infer::region_constraints::{GenericKind, VerifyBound}; -use rustc::infer::{self, SubregionOrigin}; +use rustc::infer::{self, InferCtxt, SubregionOrigin}; use rustc::mir::ConstraintCategory; use rustc::ty::subst::UnpackedKind; use rustc::ty::{self, TyCtxt}; use syntax_pos::DUMMY_SP; crate struct ConstraintConversion<'a, 'gcx: 'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>, universal_regions: &'a UniversalRegions<'tcx>, region_bound_pairs: &'a RegionBoundPairs<'tcx>, @@ -30,32 +32,30 @@ crate struct ConstraintConversion<'a, 'gcx: 'tcx, 'tcx: 'a> { param_env: ty::ParamEnv<'tcx>, locations: Locations, category: ConstraintCategory, - outlives_constraints: &'a mut ConstraintSet, - type_tests: &'a mut Vec>, + constraints: &'a mut MirTypeckRegionConstraints<'tcx>, } impl<'a, 'gcx, 'tcx> ConstraintConversion<'a, 'gcx, 'tcx> { crate fn new( - tcx: TyCtxt<'a, 'gcx, 'tcx>, + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, universal_regions: &'a UniversalRegions<'tcx>, region_bound_pairs: &'a RegionBoundPairs<'tcx>, implicit_region_bound: Option>, param_env: ty::ParamEnv<'tcx>, locations: Locations, category: ConstraintCategory, - outlives_constraints: &'a mut ConstraintSet, - type_tests: &'a mut Vec>, + constraints: &'a mut MirTypeckRegionConstraints<'tcx>, ) -> Self { Self { - tcx, + infcx, + tcx: infcx.tcx, universal_regions, region_bound_pairs, implicit_region_bound, param_env, locations, category, - outlives_constraints, - type_tests, + constraints, } } @@ -82,9 +82,9 @@ impl<'a, 'gcx, 'tcx> ConstraintConversion<'a, 'gcx, 'tcx> { // when we move to universes, we will, and this assertion // will start to fail. let ty::OutlivesPredicate(k1, r2) = - query_constraint.no_late_bound_regions().unwrap_or_else(|| { + query_constraint.no_bound_vars().unwrap_or_else(|| { bug!( - "query_constraint {:?} contained bound regions", + "query_constraint {:?} contained bound vars", query_constraint, ); }); @@ -113,7 +113,7 @@ impl<'a, 'gcx, 'tcx> ConstraintConversion<'a, 'gcx, 'tcx> { } fn verify_to_type_test( - &self, + &mut self, generic_kind: GenericKind<'tcx>, region: ty::Region<'tcx>, verify_bound: VerifyBound<'tcx>, @@ -128,22 +128,30 @@ impl<'a, 'gcx, 'tcx> ConstraintConversion<'a, 'gcx, 'tcx> { } } - fn to_region_vid(&self, r: ty::Region<'tcx>) -> ty::RegionVid { - self.universal_regions.to_region_vid(r) + fn to_region_vid(&mut self, r: ty::Region<'tcx>) -> ty::RegionVid { + if let ty::RePlaceholder(placeholder) = r { + self.constraints + .placeholder_region(self.infcx, *placeholder) + .to_region_vid() + } else { + self.universal_regions.to_region_vid(r) + } } fn add_outlives(&mut self, sup: ty::RegionVid, sub: ty::RegionVid) { - self.outlives_constraints.push(OutlivesConstraint { - locations: self.locations, - category: self.category, - sub, - sup, - }); + self.constraints + .outlives_constraints + .push(OutlivesConstraint { + locations: self.locations, + category: self.category, + sub, + sup, + }); } fn add_type_test(&mut self, type_test: TypeTest<'tcx>) { debug!("add_type_test(type_test={:?})", type_test); - self.type_tests.push(type_test); + self.constraints.type_tests.push(type_test); } } @@ -156,8 +164,8 @@ impl<'a, 'b, 'gcx, 'tcx> TypeOutlivesDelegate<'tcx> a: ty::Region<'tcx>, b: ty::Region<'tcx>, ) { - let b = self.universal_regions.to_region_vid(b); - let a = self.universal_regions.to_region_vid(a); + let b = self.to_region_vid(b); + let a = self.to_region_vid(a); self.add_outlives(b, a); } diff --git a/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs b/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs index f8c839e4d3..3d0f3d9fc7 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs @@ -271,15 +271,14 @@ impl UniversalRegionRelationsBuilder<'cx, 'gcx, 'tcx> { for data in constraint_sets { constraint_conversion::ConstraintConversion::new( - self.infcx.tcx, + self.infcx, &self.universal_regions, &self.region_bound_pairs, self.implicit_region_bound, self.param_env, Locations::All(DUMMY_SP), ConstraintCategory::Internal, - &mut self.constraints.outlives_constraints, - &mut self.constraints.type_tests, + &mut self.constraints, ).convert_all(&data); } diff --git a/src/librustc_mir/borrow_check/nll/type_check/input_output.rs b/src/librustc_mir/borrow_check/nll/type_check/input_output.rs index ab4ee3a4ad..85ea39e538 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/input_output.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/input_output.rs @@ -62,7 +62,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { // "inside" the closure. Some( self.infcx - .replace_late_bound_regions_with_fresh_var( + .replace_bound_vars_with_fresh_vars( mir.span, LateBoundRegionConversionTime::FnCall, &poly_sig, diff --git a/src/librustc_mir/borrow_check/nll/type_check/mod.rs b/src/librustc_mir/borrow_check/nll/type_check/mod.rs index f1ebddfd6d..5f64dfd931 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/mod.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/mod.rs @@ -16,6 +16,7 @@ use borrow_check::location::LocationTable; use borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint}; use borrow_check::nll::facts::AllFacts; use borrow_check::nll::region_infer::values::LivenessValues; +use borrow_check::nll::region_infer::values::PlaceholderIndex; use borrow_check::nll::region_infer::values::PlaceholderIndices; use borrow_check::nll::region_infer::values::RegionValueElements; use borrow_check::nll::region_infer::{ClosureRegionRequirementsExt, TypeTest}; @@ -28,11 +29,12 @@ use borrow_check::nll::ToRegionVid; use dataflow::move_paths::MoveData; use dataflow::FlowAtLocation; use dataflow::MaybeInitializedPlaces; +use either::Either; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::infer::canonical::QueryRegionConstraint; use rustc::infer::outlives::env::RegionBoundPairs; -use rustc::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime}; +use rustc::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime, NLLRegionVariableOrigin}; use rustc::mir::interpret::EvalErrorKind::BoundsCheck; use rustc::mir::tcx::PlaceTy; use rustc::mir::visit::{PlaceContext, Visitor, MutatingUseContext, NonMutatingUseContext}; @@ -44,14 +46,14 @@ use rustc::traits::{ObligationCause, PredicateObligations}; use rustc::ty::fold::TypeFoldable; use rustc::ty::subst::{Subst, Substs, UnpackedKind}; use rustc::ty::{self, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TyKind}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use rustc::ty::layout::VariantIdx; use std::rc::Rc; use std::{fmt, iter}; use syntax_pos::{Span, DUMMY_SP}; use transform::{MirPass, MirSource}; -use either::Either; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; - macro_rules! span_mirbug { ($context:expr, $elem:expr, $($message:tt)*) => ({ $crate::borrow_check::nll::type_check::mirbug( @@ -110,7 +112,7 @@ mod relate_tys; /// - `liveness` -- results of a liveness computation on the MIR; used to create liveness /// constraints for the regions in the types of variables /// - `flow_inits` -- results of a maybe-init dataflow analysis -/// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysiss +/// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysis pub(crate) fn type_check<'gcx, 'tcx>( infcx: &InferCtxt<'_, 'gcx, 'tcx>, param_env: ty::ParamEnv<'gcx>, @@ -126,12 +128,13 @@ pub(crate) fn type_check<'gcx, 'tcx>( ) -> MirTypeckResults<'tcx> { let implicit_region_bound = infcx.tcx.mk_region(ty::ReVar(universal_regions.fr_fn_body)); let mut constraints = MirTypeckRegionConstraints { + placeholder_indices: PlaceholderIndices::default(), + placeholder_index_to_region: IndexVec::default(), liveness_constraints: LivenessValues::new(elements), outlives_constraints: ConstraintSet::default(), closure_bounds_mapping: Default::default(), type_tests: Vec::default(), }; - let mut placeholder_indices = PlaceholderIndices::default(); let CreateResult { universal_region_relations, @@ -151,7 +154,6 @@ pub(crate) fn type_check<'gcx, 'tcx>( borrow_set, all_facts, constraints: &mut constraints, - placeholder_indices: &mut placeholder_indices, }; type_check_internal( @@ -175,7 +177,6 @@ pub(crate) fn type_check<'gcx, 'tcx>( MirTypeckResults { constraints, - placeholder_indices, universal_region_relations, } } @@ -359,7 +360,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { } fn sanitize_type(&mut self, parent: &dyn fmt::Debug, ty: Ty<'tcx>) -> Ty<'tcx> { - if ty.has_escaping_regions() || ty.references_error() { + if ty.has_escaping_bound_vars() || ty.references_error() { span_mirbug_and_err!(self, parent, "bad type {:?}", ty) } else { ty @@ -574,7 +575,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { }, ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty { ty::Adt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => { - if index >= adt_def.variants.len() { + if index.as_usize() >= adt_def.variants.len() { PlaceTy::Ty { ty: span_mirbug_and_err!( self, @@ -654,7 +655,8 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { variant_index, } => (&adt_def.variants[variant_index], substs), PlaceTy::Ty { ty } => match ty.sty { - ty::Adt(adt_def, substs) if !adt_def.is_enum() => (&adt_def.variants[0], substs), + ty::Adt(adt_def, substs) if !adt_def.is_enum() => + (&adt_def.variants[VariantIdx::new(0)], substs), ty::Closure(def_id, substs) => { return match substs.upvar_tys(def_id, tcx).nth(field.index()) { Some(ty) => Ok(ty), @@ -730,18 +732,30 @@ struct BorrowCheckContext<'a, 'tcx: 'a> { all_facts: &'a mut Option, borrow_set: &'a BorrowSet<'tcx>, constraints: &'a mut MirTypeckRegionConstraints<'tcx>, - placeholder_indices: &'a mut PlaceholderIndices, } crate struct MirTypeckResults<'tcx> { crate constraints: MirTypeckRegionConstraints<'tcx>, - crate placeholder_indices: PlaceholderIndices, crate universal_region_relations: Rc>, } /// A collection of region constraints that must be satisfied for the /// program to be considered well-typed. crate struct MirTypeckRegionConstraints<'tcx> { + /// Maps from a `ty::Placeholder` to the corresponding + /// `PlaceholderIndex` bit that we will use for it. + /// + /// To keep everything in sync, do not insert this set + /// directly. Instead, use the `placeholder_region` helper. + crate placeholder_indices: PlaceholderIndices, + + /// Each time we add a placeholder to `placeholder_indices`, we + /// also create a corresponding "representative" region vid for + /// that wraps it. This vector tracks those. This way, when we + /// convert the same `ty::RePlaceholder(p)` twice, we can map to + /// the same underlying `RegionVid`. + crate placeholder_index_to_region: IndexVec>, + /// In general, the type-checker is not responsible for enforcing /// liveness constraints; this job falls to the region inferencer, /// which performs a liveness analysis. However, in some limited @@ -759,6 +773,25 @@ crate struct MirTypeckRegionConstraints<'tcx> { crate type_tests: Vec>, } +impl MirTypeckRegionConstraints<'tcx> { + fn placeholder_region( + &mut self, + infcx: &InferCtxt<'_, '_, 'tcx>, + placeholder: ty::PlaceholderRegion, + ) -> ty::Region<'tcx> { + let placeholder_index = self.placeholder_indices.insert(placeholder); + match self.placeholder_index_to_region.get(placeholder_index) { + Some(&v) => v, + None => { + let origin = NLLRegionVariableOrigin::Placeholder(placeholder); + let region = infcx.next_nll_region_var_in_universe(origin, placeholder.universe); + self.placeholder_index_to_region.push(region); + region + } + } + } +} + /// The `Locations` type summarizes *where* region constraints are /// required to hold. Normally, this is at a particular point which /// created the obligation, but for constraints that the user gave, we @@ -888,15 +921,14 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { if let Some(ref mut borrowck_context) = self.borrowck_context { constraint_conversion::ConstraintConversion::new( - self.infcx.tcx, + self.infcx, borrowck_context.universal_regions, self.region_bound_pairs, self.implicit_region_bound, self.param_env, locations, category, - &mut borrowck_context.constraints.outlives_constraints, - &mut borrowck_context.constraints.type_tests, + &mut borrowck_context.constraints, ).convert_all(&data); } } @@ -1250,7 +1282,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ); } }; - if variant_index >= adt.variants.len() { + if variant_index.as_usize() >= adt.variants.len() { span_bug!( stmt.source_info.span, "bad set discriminant ({:?} = {:?}): value of of range", @@ -1279,11 +1311,11 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } StatementKind::FakeRead(..) - | StatementKind::StorageLive(_) - | StatementKind::StorageDead(_) + | StatementKind::StorageLive(..) + | StatementKind::StorageDead(..) | StatementKind::InlineAsm { .. } - | StatementKind::EndRegion(_) - | StatementKind::Validate(..) + | StatementKind::Retag { .. } + | StatementKind::EscapeToRaw { .. } | StatementKind::Nop => {} } } @@ -1374,7 +1406,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { return; } }; - let (sig, map) = self.infcx.replace_late_bound_regions_with_fresh_var( + let (sig, map) = self.infcx.replace_bound_vars_with_fresh_vars( term.source_info.span, LateBoundRegionConversionTime::FnCall, &sig, @@ -2203,8 +2235,8 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { .enumerate() .filter_map(|(idx, constraint)| { let ty::OutlivesPredicate(k1, r2) = - constraint.no_late_bound_regions().unwrap_or_else(|| { - bug!("query_constraint {:?} contained bound regions", constraint,); + constraint.no_bound_vars().unwrap_or_else(|| { + bug!("query_constraint {:?} contained bound vars", constraint,); }); match k1.unpack() { diff --git a/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs b/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs index 13ebf46bdb..225e2841fb 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs @@ -10,10 +10,11 @@ use borrow_check::nll::constraints::OutlivesConstraint; use borrow_check::nll::type_check::{BorrowCheckContext, Locations}; -use rustc::infer::nll_relate::{TypeRelating, TypeRelatingDelegate}; +use rustc::infer::nll_relate::{TypeRelating, TypeRelatingDelegate, NormalizationStrategy}; use rustc::infer::{InferCtxt, NLLRegionVariableOrigin}; use rustc::mir::ConstraintCategory; use rustc::traits::query::Fallible; +use rustc::traits::DomainGoal; use rustc::ty::relate::TypeRelation; use rustc::ty::{self, Ty}; @@ -38,7 +39,7 @@ pub(super) fn relate_types<'tcx>( TypeRelating::new( infcx, NllTypeRelatingDelegate::new(infcx, borrowck_context, locations, category), - v, + v ).relate(&a, &b)?; Ok(()) } @@ -76,16 +77,23 @@ impl TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, '_, 'tcx> { } fn next_existential_region_var(&mut self) -> ty::Region<'tcx> { - let origin = NLLRegionVariableOrigin::Existential; - self.infcx.next_nll_region_var(origin) + if let Some(_) = &mut self.borrowck_context { + let origin = NLLRegionVariableOrigin::Existential; + self.infcx.next_nll_region_var(origin) + } else { + self.infcx.tcx.types.re_erased + } } - fn next_placeholder_region(&mut self, placeholder: ty::Placeholder) -> ty::Region<'tcx> { - let origin = NLLRegionVariableOrigin::Placeholder(placeholder); + fn next_placeholder_region( + &mut self, + placeholder: ty::PlaceholderRegion + ) -> ty::Region<'tcx> { if let Some(borrowck_context) = &mut self.borrowck_context { - borrowck_context.placeholder_indices.insert(placeholder); + borrowck_context.constraints.placeholder_region(self.infcx, placeholder) + } else { + self.infcx.tcx.types.re_erased } - self.infcx.next_nll_region_var(origin) } fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> { @@ -108,4 +116,16 @@ impl TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, '_, 'tcx> { }); } } + + fn push_domain_goal(&mut self, _: DomainGoal<'tcx>) { + bug!("should never be invoked with eager normalization") + } + + fn normalization() -> NormalizationStrategy { + NormalizationStrategy::Eager + } + + fn forbid_inference_vars() -> bool { + true + } } diff --git a/src/librustc_mir/borrow_check/places_conflict.rs b/src/librustc_mir/borrow_check/places_conflict.rs index c0f059619a..715d6e0c0d 100644 --- a/src/librustc_mir/borrow_check/places_conflict.rs +++ b/src/librustc_mir/borrow_check/places_conflict.rs @@ -262,7 +262,7 @@ struct PlaceComponents<'p, 'tcx: 'p> { impl<'p, 'tcx> PlaceComponents<'p, 'tcx> { /// Converts a list of `Place` components into an iterator; this /// iterator yields up a never-ending stream of `Option<&Place>`. - /// These begin with the "innermst" place and then with each + /// These begin with the "innermost" place and then with each /// projection therefrom. So given a place like `a.b.c` it would /// yield up: /// diff --git a/src/librustc_mir/borrow_check/used_muts.rs b/src/librustc_mir/borrow_check/used_muts.rs index dad87ed65a..7c75fb5991 100644 --- a/src/librustc_mir/borrow_check/used_muts.rs +++ b/src/librustc_mir/borrow_check/used_muts.rs @@ -9,43 +9,113 @@ // except according to those terms. use rustc::mir::visit::{PlaceContext, Visitor}; -use rustc::mir::{Local, Location, Place}; +use rustc::mir::{BasicBlock, Local, Location, Place, Statement, StatementKind, TerminatorKind}; use rustc_data_structures::fx::FxHashSet; use borrow_check::MirBorrowckCtxt; impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { - /// Walks the MIR looking for assignments to a set of locals, as part of the unused mutable - /// local variables lint, to update the context's `used_mut` in a single walk. - crate fn gather_used_muts(&mut self, locals: FxHashSet) { - let mut visitor = GatherUsedMutsVisitor { - needles: locals, - mbcx: self, - }; - visitor.visit_mir(visitor.mbcx.mir); + /// Walks the MIR adding to the set of `used_mut` locals that will be ignored for the purposes + /// of the `unused_mut` lint. + /// + /// `temporary_used_locals` should contain locals that were found to be temporary, mutable and + /// used from borrow checking. This function looks for assignments into these locals from + /// user-declared locals and adds those user-defined locals to the `used_mut` set. This can + /// occur due to a rare case involving upvars in closures. + /// + /// `never_initialized_mut_locals` should contain the set of user-declared mutable locals + /// (not arguments) that have not already been marked as being used. + /// This function then looks for assignments from statements or the terminator into the locals + /// from this set and removes them from the set. This leaves only those locals that have not + /// been assigned to - this set is used as a proxy for locals that were not initialized due to + /// unreachable code. These locals are then considered "used" to silence the lint for them. + /// See #55344 for context. + crate fn gather_used_muts( + &mut self, + temporary_used_locals: FxHashSet, + mut never_initialized_mut_locals: FxHashSet, + ) { + { + let mut visitor = GatherUsedMutsVisitor { + temporary_used_locals, + never_initialized_mut_locals: &mut never_initialized_mut_locals, + mbcx: self, + }; + visitor.visit_mir(visitor.mbcx.mir); + } + + // Take the union of the existed `used_mut` set with those variables we've found were + // never initialized. + debug!("gather_used_muts: never_initialized_mut_locals={:?}", never_initialized_mut_locals); + self.used_mut = self.used_mut.union(&never_initialized_mut_locals).cloned().collect(); } } -/// MIR visitor gathering the assignments to a set of locals, in a single walk. -/// 'visit = the duration of the MIR walk +/// MIR visitor for collecting used mutable variables. +/// The 'visit lifetime represents the duration of the MIR walk. struct GatherUsedMutsVisitor<'visit, 'cx: 'visit, 'gcx: 'tcx, 'tcx: 'cx> { - needles: FxHashSet, + temporary_used_locals: FxHashSet, + never_initialized_mut_locals: &'visit mut FxHashSet, mbcx: &'visit mut MirBorrowckCtxt<'cx, 'gcx, 'tcx>, } impl<'visit, 'cx, 'gcx, 'tcx> Visitor<'tcx> for GatherUsedMutsVisitor<'visit, 'cx, 'gcx, 'tcx> { + fn visit_terminator_kind( + &mut self, + _block: BasicBlock, + kind: &TerminatorKind<'tcx>, + _location: Location, + ) { + debug!("visit_terminator_kind: kind={:?}", kind); + match &kind { + TerminatorKind::Call { destination: Some((into, _)), .. } => { + if let Some(local) = into.base_local() { + debug!( + "visit_terminator_kind: kind={:?} local={:?} \ + never_initialized_mut_locals={:?}", + kind, local, self.never_initialized_mut_locals + ); + let _ = self.never_initialized_mut_locals.remove(&local); + } + }, + _ => {}, + } + } + + fn visit_statement( + &mut self, + _block: BasicBlock, + statement: &Statement<'tcx>, + _location: Location, + ) { + match &statement.kind { + StatementKind::Assign(into, _) => { + // Remove any locals that we found were initialized from the + // `never_initialized_mut_locals` set. At the end, the only remaining locals will + // be those that were never initialized - we will consider those as being used as + // they will either have been removed by unreachable code optimizations; or linted + // as unused variables. + if let Some(local) = into.base_local() { + debug!( + "visit_statement: statement={:?} local={:?} \ + never_initialized_mut_locals={:?}", + statement, local, self.never_initialized_mut_locals + ); + let _ = self.never_initialized_mut_locals.remove(&local); + } + }, + _ => {}, + } + } + fn visit_local( &mut self, local: &Local, place_context: PlaceContext<'tcx>, location: Location, ) { - if !self.needles.contains(local) { - return; - } - - if place_context.is_place_assignment() { + if place_context.is_place_assignment() && self.temporary_used_locals.contains(local) { // Propagate the Local assigned at this Location as a used mutable local variable for moi in &self.mbcx.move_data.loc_map[location] { let mpi = &self.mbcx.move_data.moves[*moi].path; diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index aa383a123b..2ef71617b7 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -90,7 +90,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source_info = this.source_info(span); for stmt in stmts { - let Stmt { kind, opt_destruction_scope } = this.hir.mirror(stmt); + let Stmt { kind, opt_destruction_scope, span: stmt_span } = this.hir.mirror(stmt); match kind { StmtKind::Expr { scope, expr } => { this.block_context.push(BlockFrame::Statement { ignores_expr_result: true }); @@ -99,7 +99,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let si = (scope, source_info); this.in_scope(si, LintLevel::Inherited, block, |this| { let expr = this.hir.mirror(expr); - this.stmt_expr(block, expr) + this.stmt_expr(block, expr, Some(stmt_span)) }) })); } @@ -177,17 +177,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let destination_ty = destination.ty(&this.local_decls, tcx).to_ty(tcx); if let Some(expr) = expr { let tail_result_is_ignored = destination_ty.is_unit() || - match this.block_context.last() { - // no context: conservatively assume result is read - None => false, - - // sub-expression: block result feeds into some computation - Some(BlockFrame::SubExpr) => false, - - // otherwise: use accumualated is_ignored state. - Some(BlockFrame::TailExpr { tail_result_is_ignored: ignored }) | - Some(BlockFrame::Statement { ignores_expr_result: ignored }) => *ignored, - }; + this.block_context.currently_ignores_tail_results(); this.block_context.push(BlockFrame::TailExpr { tail_result_is_ignored }); unpack!(block = this.into(destination, block, expr)); diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 619ebb1675..2efb75c232 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -14,9 +14,7 @@ //! Routines for manipulating the control-flow graph. use build::CFG; -use rustc::middle::region; use rustc::mir::*; -use rustc::ty::TyCtxt; impl<'tcx> CFG<'tcx> { pub fn block_data(&self, blk: BasicBlock) -> &BasicBlockData<'tcx> { @@ -45,30 +43,6 @@ impl<'tcx> CFG<'tcx> { self.block_data_mut(block).statements.push(statement); } - pub fn push_end_region<'a, 'gcx:'a+'tcx>(&mut self, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - block: BasicBlock, - source_info: SourceInfo, - region_scope: region::Scope) { - if tcx.emit_end_regions() { - if let region::ScopeData::CallSite = region_scope.data { - // The CallSite scope (aka the root scope) is sort of weird, in that it is - // supposed to "separate" the "interior" and "exterior" of a closure. Being - // that, it is not really a part of the region hierarchy, but for some - // reason it *is* considered a part of it. - // - // It should die a hopefully painful death with NLL, so let's leave this hack - // for now so that nobody can complain about soundness. - return - } - - self.push(block, Statement { - source_info, - kind: StatementKind::EndRegion(region_scope), - }); - } - } - pub fn push_assign(&mut self, block: BasicBlock, source_info: SourceInfo, diff --git a/src/librustc_mir/build/expr/as_place.rs b/src/librustc_mir/build/expr/as_place.rs index 77746e5538..cb3c88876a 100644 --- a/src/librustc_mir/build/expr/as_place.rs +++ b/src/librustc_mir/build/expr/as_place.rs @@ -86,6 +86,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // region_scope=None so place indexes live forever. They are scalars so they // do not need storage annotations, and they are often copied between // places. + // Making this a *fresh* temporary also means we do not have to worry about + // the index changing later: Nothing will ever change this temporary. + // The "retagging" transformation (for Stacked Borrows) relies on this. let idx = unpack!(block = this.as_temp(block, None, index, Mutability::Mut)); // bounds check: diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 8fee74390c..18ce7ae490 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -351,7 +351,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block.and(Rvalue::Aggregate(adt, fields)) } ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => { - block = unpack!(this.stmt_expr(block, expr)); + block = unpack!(this.stmt_expr(block, expr, None)); block.and(this.unit_rvalue()) } ExprKind::Yield { value } => { diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index e0bf02c673..2db9fb9cb9 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -10,7 +10,7 @@ //! See docs in build/expr/mod.rs -use build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; +use build::{BlockAnd, BlockAndExtension, Builder}; use hair::*; use rustc::middle::region; use rustc::mir::*; @@ -68,19 +68,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { debug!("creating temp {:?} with block_context: {:?}", local_decl, this.block_context); // Find out whether this temp is being created within the // tail expression of a block whose result is ignored. - for bf in this.block_context.iter().rev() { - match bf { - BlockFrame::SubExpr => continue, - BlockFrame::Statement { .. } => break, - &BlockFrame::TailExpr { tail_result_is_ignored } => { - local_decl = local_decl.block_tail(BlockTailInfo { - tail_result_is_ignored - }); - break; - } - } + if let Some(tail_info) = this.block_context.currently_in_block_tail() { + local_decl = local_decl.block_tail(tail_info); } - this.local_decls.push(local_decl) }; if !expr_ty.is_never() { @@ -95,9 +85,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { unpack!(block = this.into(&Place::Local(temp), block, expr)); - // In constants, temp_lifetime is None. We should not need to drop - // anything because no values with a destructor can be created in - // a constant at this time, even if the type may need dropping. + // In constants, temp_lifetime is None for temporaries that live for the + // 'static lifetime. Thus we do not drop these temporaries and simply leak them. + // This is equivalent to what `let x = &foo();` does in functions. The temporary + // is lifted to their surrounding scope. In a function that means the temporary lives + // until just before the function returns. In constants that means it outlives the + // constant's initialization value computation. Anything outliving a constant + // must have the `'static` lifetime and live forever. + // Anything with a shorter lifetime (e.g the `&foo()` in `bar(&foo())` or anything + // within a block will keep the regular drops just like runtime code. if let Some(temp_lifetime) = temp_lifetime { this.schedule_drop_storage_and_value( expr_span, diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index d2913872fc..8eb46a0483 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -351,7 +351,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { | ExprKind::Break { .. } | ExprKind::InlineAsm { .. } | ExprKind::Return { .. } => { - unpack!(block = this.stmt_expr(block, expr)); + unpack!(block = this.stmt_expr(block, expr, None)); this.cfg.push_assign_unit(block, source_info, destination); block.unit() } diff --git a/src/librustc_mir/build/expr/stmt.rs b/src/librustc_mir/build/expr/stmt.rs index d2b39f088b..45235b3153 100644 --- a/src/librustc_mir/build/expr/stmt.rs +++ b/src/librustc_mir/build/expr/stmt.rs @@ -14,7 +14,18 @@ use hair::*; use rustc::mir::*; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { - pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd<()> { + /// Builds a block of MIR statements to evaluate the HAIR `expr`. + /// If the original expression was an AST statement, + /// (e.g. `some().code(&here());`) then `opt_stmt_span` is the + /// span of that statement (including its semicolon, if any). + /// Diagnostics use this span (which may be larger than that of + /// `expr`) to identify when statement temporaries are dropped. + pub fn stmt_expr(&mut self, + mut block: BasicBlock, + expr: Expr<'tcx>, + opt_stmt_span: Option) + -> BlockAnd<()> + { let this = self; let expr_span = expr.span; let source_info = this.source_info(expr.span); @@ -29,7 +40,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } => { let value = this.hir.mirror(value); this.in_scope((region_scope, source_info), lint_level, block, |this| { - this.stmt_expr(block, value) + this.stmt_expr(block, value, opt_stmt_span) }) } ExprKind::Assign { lhs, rhs } => { @@ -167,8 +178,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .into_boxed_slice(); let inputs = inputs .into_iter() - .map(|input| unpack!(block = this.as_local_operand(block, input))) - .collect::>() + .map(|input| { + ( + input.span(), + unpack!(block = this.as_local_operand(block, input)), + ) + }).collect::>() .into_boxed_slice(); this.cfg.push( block, @@ -186,9 +201,56 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } _ => { let expr_ty = expr.ty; - let temp = this.temp(expr.ty.clone(), expr_span); + + // Issue #54382: When creating temp for the value of + // expression like: + // + // `{ side_effects(); { let l = stuff(); the_value } }` + // + // it is usually better to focus on `the_value` rather + // than the entirety of block(s) surrounding it. + let mut temp_span = expr_span; + let mut temp_in_tail_of_block = false; + if let ExprKind::Block { body } = expr.kind { + if let Some(tail_expr) = &body.expr { + let mut expr = tail_expr; + while let rustc::hir::ExprKind::Block(subblock, _label) = &expr.node { + if let Some(subtail_expr) = &subblock.expr { + expr = subtail_expr + } else { + break; + } + } + temp_span = expr.span; + temp_in_tail_of_block = true; + } + } + + let temp = { + let mut local_decl = LocalDecl::new_temp(expr.ty.clone(), temp_span); + if temp_in_tail_of_block { + if this.block_context.currently_ignores_tail_results() { + local_decl = local_decl.block_tail(BlockTailInfo { + tail_result_is_ignored: true + }); + } + } + let temp = this.local_decls.push(local_decl); + let place = Place::Local(temp); + debug!("created temp {:?} for expr {:?} in block_context: {:?}", + temp, expr, this.block_context); + place + }; unpack!(block = this.into(&temp, block, expr)); - unpack!(block = this.build_drop(block, expr_span, temp, expr_ty)); + + // Attribute drops of the statement's temps to the + // semicolon at the statement's end. + let drop_point = this.hir.tcx().sess.source_map().end_point(match opt_stmt_span { + None => expr_span, + Some(StatementSpan(span)) => span, + }); + + unpack!(block = this.build_drop(block, drop_point, temp, expr_ty)); block.unit() } } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index b92f270255..342aaf9039 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -22,6 +22,7 @@ use hair::pattern::PatternTypeProjections; use rustc::hir; use rustc::mir::*; use rustc::ty::{self, Ty}; +use rustc::ty::layout::VariantIdx; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::fx::FxHashMap; use syntax::ast::{Name, NodeId}; @@ -663,7 +664,7 @@ enum TestKind<'tcx> { // test the branches of enum Switch { adt_def: &'tcx ty::AdtDef, - variants: BitSet, + variants: BitSet, }, // test the branches of enum diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 349d877d52..cfea357334 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -123,7 +123,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } PatternKind::Variant { adt_def, substs, variant_index, ref subpatterns } => { - let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| { + let irrefutable = adt_def.variants.iter_enumerated().all(|(i, v)| { i == variant_index || { self.hir.tcx().features().never_type && self.hir.tcx().features().exhaustive_patterns && diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index 9e6f32909b..5d9cb014f5 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -22,6 +22,7 @@ use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::fx::FxHashMap; use rustc::ty::{self, Ty}; use rustc::ty::util::IntTypeExt; +use rustc::ty::layout::VariantIdx; use rustc::mir::*; use rustc::hir::{RangeEnd, Mutability}; use syntax_pos::Span; @@ -30,7 +31,7 @@ use std::cmp::Ordering; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Identifies what test is needed to decide if `match_pair` is applicable. /// - /// It is a bug to call this with a simplifyable pattern. + /// It is a bug to call this with a simplifiable pattern. pub fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> { match *match_pair.pattern.kind { PatternKind::Variant { ref adt_def, substs: _, variant_index: _, subpatterns: _ } => { @@ -152,7 +153,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn add_variants_to_switch<'pat>(&mut self, test_place: &Place<'tcx>, candidate: &Candidate<'pat, 'tcx>, - variants: &mut BitSet) + variants: &mut BitSet) -> bool { let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) { @@ -196,7 +197,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let mut targets = Vec::with_capacity(used_variants + 1); let mut values = Vec::with_capacity(used_variants); let tcx = self.hir.tcx(); - for (idx, discr) in adt_def.discriminants(tcx).enumerate() { + for (idx, discr) in adt_def.discriminants(tcx) { target_blocks.push(if variants.contains(idx) { values.push(discr.val); targets.push(self.cfg.start_new_block()); @@ -512,7 +513,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { variant_index, subpatterns, candidate); - resulting_candidates[variant_index].push(new_candidate); + resulting_candidates[variant_index.as_usize()].push(new_candidate); true } (&TestKind::Switch { .. }, _) => false, @@ -673,7 +674,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn candidate_after_variant_switch<'pat>(&mut self, match_pair_index: usize, adt_def: &'tcx ty::AdtDef, - variant_index: usize, + variant_index: VariantIdx, subpatterns: &'pat [FieldPattern<'tcx>], candidate: &Candidate<'pat, 'tcx>) -> Candidate<'pat, 'tcx> { diff --git a/src/librustc_mir/build/matches/util.rs b/src/librustc_mir/build/matches/util.rs index cfd9100fc6..a87660db2d 100644 --- a/src/librustc_mir/build/matches/util.rs +++ b/src/librustc_mir/build/matches/util.rs @@ -13,6 +13,7 @@ use build::matches::MatchPair; use hair::*; use rustc::mir::*; use std::u32; +use std::convert::TryInto; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn field_match_pairs<'pat>(&mut self, @@ -35,8 +36,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { opt_slice: Option<&'pat Pattern<'tcx>>, suffix: &'pat [Pattern<'tcx>]) { let min_length = prefix.len() + suffix.len(); - assert!(min_length < u32::MAX as usize); - let min_length = min_length as u32; + let min_length = min_length.try_into().unwrap(); match_pairs.extend( prefix.iter() diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index 5b4001f065..d95a74be77 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -336,6 +336,9 @@ impl BlockFrame { } } +#[derive(Debug)] +struct BlockContext(Vec); + struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { hir: Cx<'a, 'gcx, 'tcx>, cfg: CFG<'tcx>, @@ -359,7 +362,7 @@ struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// start just throwing new entries onto that vector in order to /// distinguish the context of EXPR1 from the context of EXPR2 in /// `{ STMTS; EXPR1 } + EXPR2` - block_context: Vec, + block_context: BlockContext, /// The current unsafe block in scope, even if it is hidden by /// a PushUnsafeBlock @@ -409,6 +412,55 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } +impl BlockContext { + fn new() -> Self { BlockContext(vec![]) } + fn push(&mut self, bf: BlockFrame) { self.0.push(bf); } + fn pop(&mut self) -> Option { self.0.pop() } + + /// Traverses the frames on the BlockContext, searching for either + /// the first block-tail expression frame with no intervening + /// statement frame. + /// + /// Notably, this skips over `SubExpr` frames; this method is + /// meant to be used in the context of understanding the + /// relationship of a temp (created within some complicated + /// expression) with its containing expression, and whether the + /// value of that *containing expression* (not the temp!) is + /// ignored. + fn currently_in_block_tail(&self) -> Option { + for bf in self.0.iter().rev() { + match bf { + BlockFrame::SubExpr => continue, + BlockFrame::Statement { .. } => break, + &BlockFrame::TailExpr { tail_result_is_ignored } => + return Some(BlockTailInfo { tail_result_is_ignored }) + } + } + + return None; + } + + /// Looks at the topmost frame on the BlockContext and reports + /// whether its one that would discard a block tail result. + /// + /// Unlike `currently_within_ignored_tail_expression`, this does + /// *not* skip over `SubExpr` frames: here, we want to know + /// whether the block result itself is discarded. + fn currently_ignores_tail_results(&self) -> bool { + match self.0.last() { + // no context: conservatively assume result is read + None => false, + + // sub-expression: block result feeds into some computation + Some(BlockFrame::SubExpr) => false, + + // otherwise: use accumulated is_ignored state. + Some(BlockFrame::TailExpr { tail_result_is_ignored: ignored }) | + Some(BlockFrame::Statement { ignores_expr_result: ignored }) => *ignored, + } + } +} + #[derive(Debug)] enum LocalsForNode { /// In the usual case, a node-id for an identifier maps to at most @@ -612,7 +664,7 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, let var_hir_id = tcx.hir.node_to_hir_id(var_id); let closure_expr_id = tcx.hir.local_def_id(fn_id); let capture = hir.tables().upvar_capture(ty::UpvarId { - var_id: var_hir_id, + var_path: ty::UpvarPath {hir_id: var_hir_id}, closure_expr_id: LocalDefId::from_def_id(closure_expr_id), }); let by_ref = match capture { @@ -764,7 +816,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn_span: span, arg_count, scopes: vec![], - block_context: vec![], + block_context: BlockContext::new(), source_scopes: IndexVec::new(), source_scope: OUTERMOST_SOURCE_SCOPE, source_scope_local_data: IndexVec::new(), @@ -777,7 +829,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { 1, ), upvar_decls, - var_indices: NodeMap(), + var_indices: Default::default(), unit_temp: None, cached_resume_block: None, cached_return_block: None, diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index b3e6278825..2a11f24095 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -90,12 +90,13 @@ should go to. use build::{BlockAnd, BlockAndExtension, Builder, CFG}; use hair::LintLevel; use rustc::middle::region; -use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::Ty; use rustc::hir; use rustc::hir::def_id::LOCAL_CRATE; use rustc::mir::*; use syntax_pos::{Span}; use rustc_data_structures::fx::FxHashMap; +use std::collections::hash_map::Entry; #[derive(Debug)] pub struct Scope<'tcx> { @@ -224,7 +225,7 @@ impl<'tcx> Scope<'tcx> { /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. /// - /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`. + /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current /// top-of-scope (as opposed to dependent scopes). fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) { @@ -242,8 +243,8 @@ impl<'tcx> Scope<'tcx> { } if !storage_only && !this_scope_only { - for dropdata in &mut self.drops { - if let DropKind::Value { ref mut cached_block } = dropdata.kind { + for drop_data in &mut self.drops { + if let DropKind::Value { ref mut cached_block } = drop_data.kind { cached_block.invalidate(); } } @@ -323,7 +324,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let parent_hir_id = tcx.hir.definitions().node_to_hir_id( self.source_scope_local_data[source_scope].lint_root - ); + ); let current_hir_id = tcx.hir.definitions().node_to_hir_id(node_id); sets.lint_level_set(parent_hir_id) == @@ -333,7 +334,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { if !same_lint_scopes { self.source_scope = self.new_source_scope(region_scope.1.span, lint_level, - None); + None); } } self.push_scope(region_scope); @@ -381,15 +382,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let scope = self.scopes.pop().unwrap(); assert_eq!(scope.region_scope, region_scope.0); - self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope); - let resume_block = self.resume_block(); - unpack!(block = build_scope_drops(&mut self.cfg, - resume_block, - &scope, - &self.scopes, - block, - self.arg_count, - false)); + let unwind_to = self.scopes.last().and_then(|next_scope| { + next_scope.cached_unwind.get(false) + }).unwrap_or_else(|| self.resume_block()); + + unpack!(block = build_scope_drops( + &mut self.cfg, + &scope, + block, + unwind_to, + self.arg_count, + false, + )); block.unit() } @@ -397,8 +401,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Branch out of `block` to `target`, exiting all scopes up to /// and including `region_scope`. This will insert whatever drops are - /// needed, as well as tracking this exit for the SEME region. See - /// module comment for details. + /// needed. See module comment for details. pub fn exit_scope(&mut self, span: Span, region_scope: (region::Scope, SourceInfo), @@ -416,44 +419,54 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. - let may_panic = self.scopes[(len - scope_count)..].iter() - .any(|s| s.drops.iter().any(|s| s.kind.may_panic())); + let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup); if may_panic { self.diverge_cleanup(); } - { - let resume_block = self.resume_block(); - let mut rest = &mut self.scopes[(len - scope_count)..]; - while let Some((scope, rest_)) = {rest}.split_last_mut() { - rest = rest_; - block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) { - self.cfg.terminate(block, scope.source_info(span), - TerminatorKind::Goto { target: e }); - return; - } else { - let b = self.cfg.start_new_block(); - self.cfg.terminate(block, scope.source_info(span), - TerminatorKind::Goto { target: b }); - scope.cached_exits.insert((target, region_scope.0), b); - b + let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev(); + let mut scope = scopes.next().unwrap(); + for next_scope in scopes { + if scope.drops.is_empty() { + scope = next_scope; + continue; + } + let source_info = scope.source_info(span); + block = match scope.cached_exits.entry((target, region_scope.0)) { + Entry::Occupied(e) => { + self.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: *e.get() }); + return; + } + Entry::Vacant(v) => { + let b = self.cfg.start_new_block(); + self.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: b }); + v.insert(b); + b + } }; - // End all regions for scopes out of which we are breaking. - self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope); + let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| { + debug_assert!(!may_panic, "cached block not present?"); + START_BLOCK + }); - unpack!(block = build_scope_drops(&mut self.cfg, - resume_block, - scope, - rest, - block, - self.arg_count, - false)); - } + unpack!(block = build_scope_drops( + &mut self.cfg, + scope, + block, + unwind_to, + self.arg_count, + false, + )); + + scope = next_scope; } + let scope = &self.scopes[len - scope_count]; self.cfg.terminate(block, scope.source_info(span), - TerminatorKind::Goto { target: target }); + TerminatorKind::Goto { target }); } /// Creates a path that performs all required cleanup for dropping a generator. @@ -465,20 +478,20 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { return None; } - // Fill in the cache + // Fill in the cache for unwinds self.diverge_cleanup_gen(true); let src_info = self.scopes[0].source_info(self.fn_span); + let resume_block = self.resume_block(); + let mut scopes = self.scopes.iter_mut().rev().peekable(); let mut block = self.cfg.start_new_block(); let result = block; - let resume_block = self.resume_block(); - let mut rest = &mut self.scopes[..]; - while let Some((scope, rest_)) = {rest}.split_last_mut() { - rest = rest_; + while let Some(scope) = scopes.next() { if !scope.needs_cleanup { continue; } + block = if let Some(b) = scope.cached_generator_drop { self.cfg.terminate(block, src_info, TerminatorKind::Goto { target: b }); @@ -491,16 +504,20 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { b }; - // End all regions for scopes out of which we are breaking. - self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope); + let unwind_to = scopes.peek().as_ref().map(|scope| { + scope.cached_unwind.get(true).unwrap_or_else(|| { + span_bug!(src_info.span, "cached block not present?") + }) + }).unwrap_or(resume_block); - unpack!(block = build_scope_drops(&mut self.cfg, - resume_block, - scope, - rest, - block, - self.arg_count, - true)); + unpack!(block = build_scope_drops( + &mut self.cfg, + scope, + block, + unwind_to, + self.arg_count, + true, + )); } self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop); @@ -510,9 +527,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Creates a new source scope, nested in the current one. pub fn new_source_scope(&mut self, - span: Span, - lint_level: LintLevel, - safety: Option) -> SourceScope { + span: Span, + lint_level: LintLevel, + safety: Option) -> SourceScope { let parent = self.source_scope; debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}", span, lint_level, safety, @@ -749,8 +766,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. - /// See module comment for more details. None indicates there’s no - /// cleanup to do at this point. + /// See module comment for more details. pub fn diverge_cleanup(&mut self) -> BasicBlock { self.diverge_cleanup_gen(false) } @@ -772,11 +788,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock { - // To start, create the resume terminator. - let mut target = self.resume_block(); - - let Builder { ref mut cfg, ref mut scopes, .. } = *self; - // Build up the drops in **reverse** order. The end result will // look like: // @@ -788,11 +799,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // store caches. If everything is cached, we'll just walk right // to left reading the cached results but never created anything. - if scopes.iter().any(|scope| scope.needs_cleanup) { - for scope in scopes.iter_mut() { - target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span, - scope, target, generator_drop); - } + // Find the last cached block + let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter() + .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) { + (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1) + } else { + (self.resume_block(), 0) + }; + + for scope in self.scopes[first_uncached..].iter_mut() { + target = build_diverge_scope(&mut self.cfg, scope.region_scope_span, + scope, target, generator_drop); } target @@ -866,64 +883,62 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } /// Builds drops for pop_scope and exit_scope. -fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, - resume_block: BasicBlock, - scope: &Scope<'tcx>, - earlier_scopes: &[Scope<'tcx>], - mut block: BasicBlock, - arg_count: usize, - generator_drop: bool) - -> BlockAnd<()> { - debug!("build_scope_drops({:?} -> {:?})", block, scope); - let mut iter = scope.drops.iter().rev(); - while let Some(drop_data) = iter.next() { +fn build_scope_drops<'tcx>( + cfg: &mut CFG<'tcx>, + scope: &Scope<'tcx>, + mut block: BasicBlock, + last_unwind_to: BasicBlock, + arg_count: usize, + generator_drop: bool, +) -> BlockAnd<()> { + debug!("build_scope_drops({:?} -> {:?}", block, scope); + + // Build up the drops in evaluation order. The end result will + // look like: + // + // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]] + // | | | + // : | | + // V V + // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to] + // + // The horizontal arrows represent the execution path when the drops return + // successfully. The downwards arrows represent the execution path when the + // drops panic (panicking while unwinding will abort, so there's no need for + // another set of arrows). The drops for the unwind path should have already + // been generated by `diverge_cleanup_gen`. + // + // The code in this function reads from right to left. + // Storage dead drops have to be done left to right (since we can only push + // to the end of a Vec). So, we find the next drop and then call + // push_storage_deads which will iterate backwards through them so that + // they are added in the correct order. + + let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| { + if let DropKind::Value { cached_block } = drop_data.kind { + Some(cached_block.get(generator_drop).unwrap_or_else(|| { + span_bug!(drop_data.span, "cached block not present?") + })) + } else { + None + } + }); + + // When we unwind from a drop, we start cleaning up from the next one, so + // we don't need this block. + unwind_blocks.next(); + + for drop_data in scope.drops.iter().rev() { let source_info = scope.source_info(drop_data.span); match drop_data.kind { DropKind::Value { .. } => { - // Try to find the next block with its cached block for us to - // diverge into, either a previous block in this current scope or - // the top of the previous scope. - // - // If it wasn't for EndRegion, we could just chain all the DropData - // together and pick the first DropKind::Value. Please do that - // when we replace EndRegion with NLL. - let on_diverge = iter.clone().filter_map(|dd| { - match dd.kind { - DropKind::Value { cached_block } => Some(cached_block), - DropKind::Storage => None - } - }).next().or_else(|| { - if earlier_scopes.iter().any(|scope| scope.needs_cleanup) { - // If *any* scope requires cleanup code to be run, - // we must use the cached unwind from the *topmost* - // scope, to ensure all EndRegions from surrounding - // scopes are executed before the drop code runs. - Some(earlier_scopes.last().unwrap().cached_unwind) - } else { - // We don't need any further cleanup, so return None - // to avoid creating a landing pad. We can skip - // EndRegions because all local regions end anyway - // when the function unwinds. - // - // This is an important optimization because LLVM is - // terrible at optimizing landing pads. FIXME: I think - // it would be cleaner and better to do this optimization - // in SimplifyCfg instead of here. - None - } - }); - - let on_diverge = on_diverge.map(|cached_block| { - cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present?") - }) - }); + let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to); let next = cfg.start_new_block(); cfg.terminate(block, source_info, TerminatorKind::Drop { location: drop_data.location.clone(), target: next, - unwind: Some(on_diverge.unwrap_or(resume_block)) + unwind: Some(unwind_to) }); block = next; } @@ -950,21 +965,17 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, block.unit() } -fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - cfg: &mut CFG<'tcx>, - span: Span, - scope: &mut Scope<'tcx>, - mut target: BasicBlock, - generator_drop: bool) - -> BasicBlock +fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, + span: Span, + scope: &mut Scope<'tcx>, + mut target: BasicBlock, + generator_drop: bool) + -> BasicBlock { // Build up the drops in **reverse** order. The end result will // look like: // - // [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target] - // | | - // +---------------------------------------------------------+ - // code for scope + // [drops[n]] -...-> [drops[0]] -> [target] // // The code in this function reads from right to left. At each // point, we check for cached blocks representing the @@ -1009,21 +1020,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, }; } - // Finally, push the EndRegion block, used by mir-borrowck, and set - // `cached_unwind` to point to it (Block becomes trivial goto after - // pass that removes all EndRegions). - target = { - let cached_block = scope.cached_unwind.ref_mut(generator_drop); - if let Some(cached_block) = *cached_block { - cached_block - } else { - let block = cfg.start_new_cleanup_block(); - cfg.push_end_region(tcx, block, source_info(span), scope.region_scope); - cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target }); - *cached_block = Some(block); - block - } - }; + *scope.cached_unwind.ref_mut(generator_drop) = Some(target); debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target); diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 9702e94a9e..291b5c170e 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -17,22 +17,25 @@ use std::hash::Hash; use std::collections::hash_map::Entry; use rustc::hir::{self, def_id::DefId}; -use rustc::mir::interpret::ConstEvalErr; +use rustc::hir::def::Def; +use rustc::mir::interpret::{ConstEvalErr, ErrorHandled}; use rustc::mir; -use rustc::ty::{self, Ty, TyCtxt, Instance, query::TyCtxtAt}; -use rustc::ty::layout::{self, Size, LayoutOf, TyLayout}; +use rustc::ty::{self, TyCtxt, Instance, query::TyCtxtAt}; +use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx}; use rustc::ty::subst::Subst; +use rustc::traits::Reveal; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::fx::FxHashMap; +use rustc::util::common::ErrorReported; use syntax::ast::Mutability; use syntax::source_map::{Span, DUMMY_SP}; -use interpret::{self, - PlaceTy, MemPlace, OpTy, Operand, Value, Pointer, Scalar, ConstValue, +use crate::interpret::{self, + PlaceTy, MPlaceTy, MemPlace, OpTy, Operand, Immediate, Scalar, RawConst, ConstValue, Pointer, EvalResult, EvalError, EvalErrorKind, GlobalId, EvalContext, StackPopCleanup, Allocation, AllocId, MemoryKind, - snapshot, + snapshot, RefTracking, }; /// Number of steps until the detector even starts doing anything. @@ -62,6 +65,7 @@ pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( return_place: None, return_to_block: StackPopCleanup::Goto(None), // never pop stmt: 0, + extra: (), }); Ok(ecx) } @@ -91,11 +95,12 @@ pub(crate) fn eval_promoted<'a, 'mir, 'tcx>( cid: GlobalId<'tcx>, mir: &'mir mir::Mir<'tcx>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, OpTy<'tcx>> { +) -> EvalResult<'tcx, MPlaceTy<'tcx>> { let mut ecx = mk_borrowck_eval_cx(tcx, cid.instance, mir, DUMMY_SP).unwrap(); eval_body_using_ecx(&mut ecx, cid, Some(mir), param_env) } +// FIXME: These two conversion functions are bad hacks. We should just always use allocations. pub fn op_to_const<'tcx>( ecx: &CompileTimeEvalContext<'_, '_, 'tcx>, op: OpTy<'tcx>, @@ -112,7 +117,7 @@ pub fn op_to_const<'tcx>( _ => false, }; let normalized_op = if normalize { - ecx.try_read_value(op)? + ecx.try_read_immediate(op)? } else { match op.op { Operand::Indirect(mplace) => Err(mplace), @@ -125,7 +130,7 @@ pub fn op_to_const<'tcx>( assert!(meta.is_none()); let ptr = ptr.to_ptr()?; let alloc = ecx.memory.get(ptr.alloc_id)?; - assert!(alloc.align.abi() >= align.abi()); + assert!(alloc.align >= align); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); let mut alloc = alloc.clone(); alloc.align = align; @@ -134,20 +139,27 @@ pub fn op_to_const<'tcx>( let alloc = ecx.tcx.intern_const_alloc(alloc); ConstValue::ByRef(ptr.alloc_id, alloc, ptr.offset) }, - Ok(Value::Scalar(x)) => + Ok(Immediate::Scalar(x)) => ConstValue::Scalar(x.not_undef()?), - Ok(Value::ScalarPair(a, b)) => + Ok(Immediate::ScalarPair(a, b)) => ConstValue::ScalarPair(a.not_undef()?, b.not_undef()?), }; Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty)) } +pub fn const_to_op<'tcx>( + ecx: &CompileTimeEvalContext<'_, '_, 'tcx>, + cnst: &ty::Const<'tcx>, +) -> EvalResult<'tcx, OpTy<'tcx>> { + let op = ecx.const_value_to_op(cnst.val)?; + Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? }) +} fn eval_body_and_ecx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> (EvalResult<'tcx, OpTy<'tcx>>, CompileTimeEvalContext<'a, 'mir, 'tcx>) { +) -> (EvalResult<'tcx, MPlaceTy<'tcx>>, CompileTimeEvalContext<'a, 'mir, 'tcx>) { // we start out with the best span we have // and try improving it down the road when more information is available let span = tcx.def_span(cid.instance.def_id()); @@ -163,7 +175,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, OpTy<'tcx>> { +) -> EvalResult<'tcx, MPlaceTy<'tcx>> { debug!("eval_body_using_ecx: {:?}, {:?}", cid, param_env); let tcx = ecx.tcx.tcx; let mut mir = match mir { @@ -203,7 +215,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( ecx.memory.intern_static(ret.ptr.to_ptr()?.alloc_id, mutability)?; debug!("eval_body_using_ecx done: {:?}", *ret); - Ok(ret.into()) + Ok(ret) } impl<'tcx> Into> for ConstEvalError { @@ -342,13 +354,15 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> for CompileTimeInterpreter<'a, 'mir, 'tcx> { type MemoryKinds = !; - type AllocExtra = (); type PointerTag = (); + type FrameExtra = (); + type MemoryExtra = (); + type AllocExtra = (); + type MemoryMap = FxHashMap, Allocation)>; const STATIC_KIND: Option = None; // no copying of statics allowed - const ENABLE_PTR_TRACKING_HOOKS: bool = false; // we don't have no provenance #[inline(always)] fn enforce_validity(_ecx: &EvalContext<'a, 'mir, 'tcx, Self>) -> bool { @@ -363,13 +377,19 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> ret: Option, ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> { debug!("eval_fn_call: {:?}", instance); - if !ecx.tcx.is_const_fn(instance.def_id()) { + // Execution might have wandered off into other crates, so we cannot to a stability- + // sensitive check here. But we can at least rule out functions that are not const + // at all. + if !ecx.tcx.is_const_fn_raw(instance.def_id()) { // Some functions we support even if they are non-const -- but avoid testing - // that for const fn! - if ecx.hook_fn(instance, args, dest)? { + // that for const fn! We certainly do *not* want to actually call the fn + // though, so be sure we return here. + return if ecx.hook_fn(instance, args, dest)? { ecx.goto_block(ret)?; // fully evaluated and done - return Ok(None); - } + Ok(None) + } else { + err!(MachineError(format!("calling non-const function `{}`", instance))) + }; } // This is a const fn. Call it. Ok(Some(match ecx.load_mir(instance.def) { @@ -416,16 +436,18 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> } fn find_foreign_static( - _tcx: TyCtxtAt<'a, 'tcx, 'tcx>, _def_id: DefId, + _tcx: TyCtxtAt<'a, 'tcx, 'tcx>, + _memory_extra: &(), ) -> EvalResult<'tcx, Cow<'tcx, Allocation>> { err!(ReadForeignStatic) } #[inline(always)] - fn static_with_default_tag( - alloc: &'_ Allocation - ) -> Cow<'_, Allocation> { + fn adjust_static_allocation<'b>( + alloc: &'b Allocation, + _memory_extra: &(), + ) -> Cow<'b, Allocation> { // We do not use a tag so we can just cheaply forward the reference Cow::Borrowed(alloc) } @@ -464,22 +486,27 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> } #[inline(always)] - fn tag_reference( + fn tag_new_allocation( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _ptr: Pointer, - _pointee_ty: Ty<'tcx>, - _pointee_size: Size, - _borrow_kind: Option, - ) -> EvalResult<'tcx, Self::PointerTag> { - Ok(()) + ptr: Pointer, + _kind: MemoryKind, + ) -> EvalResult<'tcx, Pointer> { + Ok(ptr) } #[inline(always)] - fn tag_dereference( - _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, - _ptr: Pointer, - _ptr_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, Self::PointerTag> { + fn stack_push( + _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ) -> EvalResult<'tcx> { + Ok(()) + } + + /// Called immediately before a stack frame gets popped + #[inline(always)] + fn stack_pop( + _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + _extra: (), + ) -> EvalResult<'tcx> { Ok(()) } } @@ -489,7 +516,7 @@ pub fn const_field<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, instance: ty::Instance<'tcx>, - variant: Option, + variant: Option, field: mir::Field, value: &'tcx ty::Const<'tcx>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { @@ -497,7 +524,7 @@ pub fn const_field<'a, 'tcx>( let ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let result = (|| { // get the operand again - let op = ecx.const_to_op(value)?; + let op = const_to_op(&ecx, value)?; // downcast let down = match variant { None => op, @@ -509,13 +536,10 @@ pub fn const_field<'a, 'tcx>( // this is not called for statics. op_to_const(&ecx, field, true) })(); - result.map_err(|err| { - let (trace, span) = ecx.generate_stacktrace(None); - ConstEvalErr { - error: err, - stacktrace: trace, - span, - }.into() + result.map_err(|error| { + let err = error_to_const_error(&ecx, error); + err.report_as_error(ecx.tcx, "could not access field of constant"); + ErrorHandled::Reported }) } @@ -524,48 +548,128 @@ pub fn const_variant_index<'a, 'tcx>( param_env: ty::ParamEnv<'tcx>, instance: ty::Instance<'tcx>, val: &'tcx ty::Const<'tcx>, -) -> EvalResult<'tcx, usize> { +) -> EvalResult<'tcx, VariantIdx> { trace!("const_variant_index: {:?}, {:?}", instance, val); let ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); - let op = ecx.const_to_op(val)?; + let op = const_to_op(&ecx, val)?; Ok(ecx.read_discriminant(op)?.1) } -pub fn const_to_allocation_provider<'a, 'tcx>( - _tcx: TyCtxt<'a, 'tcx, 'tcx>, - val: &'tcx ty::Const<'tcx>, -) -> &'tcx Allocation { - // FIXME: This really does not need to be a query. Instead, we should have a query for statics - // that returns an allocation directly (or an `AllocId`?), after doing a sanity check of the - // value and centralizing error reporting. - match val.val { - ConstValue::ByRef(_, alloc, offset) => { - assert_eq!(offset.bytes(), 0); - return alloc; - }, - _ => bug!("const_to_allocation called on non-static"), - } +pub fn error_to_const_error<'a, 'mir, 'tcx>( + ecx: &EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>, + mut error: EvalError<'tcx> +) -> ConstEvalErr<'tcx> { + error.print_backtrace(); + let stacktrace = ecx.generate_stacktrace(None); + ConstEvalErr { error: error.kind, stacktrace, span: ecx.tcx.span } +} + +fn validate_and_turn_into_const<'a, 'tcx>( + tcx: ty::TyCtxt<'a, 'tcx, 'tcx>, + constant: RawConst<'tcx>, + key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, +) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { + let cid = key.value; + let ecx = mk_eval_cx(tcx, cid.instance, key.param_env).unwrap(); + let val = (|| { + let op = ecx.raw_const_to_mplace(constant)?.into(); + // FIXME: Once the visitor infrastructure landed, change validation to + // work directly on `MPlaceTy`. + let mut ref_tracking = RefTracking::new(op); + while let Some((op, path)) = ref_tracking.todo.pop() { + ecx.validate_operand( + op, + path, + Some(&mut ref_tracking), + /* const_mode */ true, + )?; + } + // Now that we validated, turn this into a proper constant + let def_id = cid.instance.def.def_id(); + let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none(); + op_to_const(&ecx, op, normalize) + })(); + + val.map_err(|error| { + let err = error_to_const_error(&ecx, error); + match err.struct_error(ecx.tcx, "it is undefined behavior to use this value") { + Ok(mut diag) => { + diag.note("The rules on what exactly is undefined behavior aren't clear, \ + so this check might be overzealous. Please open an issue on the rust compiler \ + repository if you believe it should not be considered undefined behavior", + ); + diag.emit(); + ErrorHandled::Reported + } + Err(err) => err, + } + }) } pub fn const_eval_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { + // see comment in const_eval_provider for what we're doing here + if key.param_env.reveal == Reveal::All { + let mut key = key.clone(); + key.param_env.reveal = Reveal::UserFacing; + match tcx.const_eval(key) { + // try again with reveal all as requested + Err(ErrorHandled::TooGeneric) => { + // Promoteds should never be "too generic" when getting evaluated. + // They either don't get evaluated, or we are in a monomorphic context + assert!(key.value.promoted.is_none()); + }, + // dedupliate calls + other => return other, + } + } + tcx.const_eval_raw(key).and_then(|val| { + validate_and_turn_into_const(tcx, val, key) + }) +} + +pub fn const_eval_raw_provider<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, +) -> ::rustc::mir::interpret::ConstEvalRawResult<'tcx> { + // Because the constant is computed twice (once per value of `Reveal`), we are at risk of + // reporting the same error twice here. To resolve this, we check whether we can evaluate the + // constant in the more restrictive `Reveal::UserFacing`, which most likely already was + // computed. For a large percentage of constants that will already have succeeded. Only + // associated constants of generic functions will fail due to not enough monomorphization + // information being available. + + // In case we fail in the `UserFacing` variant, we just do the real computation. + if key.param_env.reveal == Reveal::All { + let mut key = key.clone(); + key.param_env.reveal = Reveal::UserFacing; + match tcx.const_eval_raw(key) { + // try again with reveal all as requested + Err(ErrorHandled::TooGeneric) => {}, + // dedupliate calls + other => return other, + } + } + // the first trace is for replicating an ice + // There's no tracking issue, but the next two lines concatenated link to the discussion on + // zulip. It's not really possible to test this, because it doesn't show up in diagnostics + // or MIR. + // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/ + // subject/anon_const_instance_printing/near/135980032 + trace!("const eval: {}", key.value.instance); trace!("const eval: {:?}", key); + let cid = key.value; let def_id = cid.instance.def.def_id(); if let Some(id) = tcx.hir.as_local_node_id(def_id) { let tables = tcx.typeck_tables_of(def_id); - let span = tcx.def_span(def_id); // Do match-check before building MIR - if tcx.check_match(def_id).is_err() { - return Err(ConstEvalErr { - error: EvalErrorKind::CheckMatchError.into(), - stacktrace: vec![], - span, - }.into()); + if let Err(ErrorReported) = tcx.check_match(def_id) { + return Err(ErrorHandled::Reported) } if let hir::BodyOwnerKind::Const = tcx.hir.body_owner_kind(id) { @@ -574,38 +678,71 @@ pub fn const_eval_provider<'a, 'tcx>( // Do not continue into miri if typeck errors occurred; it will fail horribly if tables.tainted_by_errors { - return Err(ConstEvalErr { - error: EvalErrorKind::CheckMatchError.into(), - stacktrace: vec![], - span, - }.into()); + return Err(ErrorHandled::Reported) } }; let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env); - res.and_then(|op| { - let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none(); - if !normalize { - // Sanity check: These must always be a MemPlace - match op.op { - Operand::Indirect(_) => { /* all is good */ }, - Operand::Immediate(_) => bug!("const eval gave us an Immediate"), - } - } - op_to_const(&ecx, op, normalize) - }).map_err(|err| { - let (trace, span) = ecx.generate_stacktrace(None); - let err = ConstEvalErr { - error: err, - stacktrace: trace, - span, - }; + res.and_then(|place| { + Ok(RawConst { + alloc_id: place.to_ptr().expect("we allocated this ptr!").alloc_id, + ty: place.layout.ty + }) + }).map_err(|error| { + let err = error_to_const_error(&ecx, error); + // errors in statics are always emitted as fatal errors if tcx.is_static(def_id).is_some() { - err.report_as_error(ecx.tcx, "could not evaluate static initializer"); + let err = err.report_as_error(ecx.tcx, "could not evaluate static initializer"); + // check that a static never produces `TooGeneric` if tcx.sess.err_count() == 0 { - span_bug!(span, "static eval failure didn't emit an error: {:#?}", err); + span_bug!(ecx.tcx.span, "static eval failure didn't emit an error: {:#?}", err); } + err + } else if def_id.is_local() { + // constant defined in this crate, we can figure out a lint level! + match tcx.describe_def(def_id) { + // constants never produce a hard error at the definition site. Anything else is + // a backwards compatibility hazard (and will break old versions of winapi for sure) + // + // note that validation may still cause a hard error on this very same constant, + // because any code that existed before validation could not have failed validation + // thus preventing such a hard error from being a backwards compatibility hazard + Some(Def::Const(_)) | Some(Def::AssociatedConst(_)) => { + let node_id = tcx.hir.as_local_node_id(def_id).unwrap(); + err.report_as_lint( + tcx.at(tcx.def_span(def_id)), + "any use of this value will cause an error", + node_id, + ) + }, + // promoting runtime code is only allowed to error if it references broken constants + // any other kind of error will be reported to the user as a deny-by-default lint + _ => if let Some(p) = cid.promoted { + let span = tcx.optimized_mir(def_id).promoted[p].span; + if let EvalErrorKind::ReferencedConstant = err.error { + err.report_as_error( + tcx.at(span), + "evaluation of constant expression failed", + ) + } else { + err.report_as_lint( + tcx.at(span), + "reaching this expression at runtime will panic or abort", + tcx.hir.as_local_node_id(def_id).unwrap(), + ) + } + // anything else (array lengths, enum initializers, constant patterns) are reported + // as hard errors + } else { + err.report_as_error( + ecx.tcx, + "evaluation of constant value failed", + ) + }, + } + } else { + // use of broken constant from other crate + err.report_as_error(ecx.tcx, "could not evaluate constant") } - err.into() }) } diff --git a/src/librustc_mir/dataflow/impls/borrowed_locals.rs b/src/librustc_mir/dataflow/impls/borrowed_locals.rs index 8d186597b1..1e279d8dd9 100644 --- a/src/librustc_mir/dataflow/impls/borrowed_locals.rs +++ b/src/librustc_mir/dataflow/impls/borrowed_locals.rs @@ -28,7 +28,7 @@ pub struct HaveBeenBorrowedLocals<'a, 'tcx: 'a> { impl<'a, 'tcx: 'a> HaveBeenBorrowedLocals<'a, 'tcx> { pub fn new(mir: &'a Mir<'tcx>) -> Self { - HaveBeenBorrowedLocals { mir: mir } + HaveBeenBorrowedLocals { mir } } pub fn mir(&self) -> &Mir<'tcx> { diff --git a/src/librustc_mir/dataflow/impls/borrows.rs b/src/librustc_mir/dataflow/impls/borrows.rs index f7043487c5..27bc28ac81 100644 --- a/src/librustc_mir/dataflow/impls/borrows.rs +++ b/src/librustc_mir/dataflow/impls/borrows.rs @@ -12,18 +12,13 @@ use borrow_check::borrow_set::{BorrowSet, BorrowData}; use borrow_check::place_ext::PlaceExt; use rustc; -use rustc::hir; -use rustc::hir::def_id::DefId; -use rustc::middle::region; use rustc::mir::{self, Location, Place, Mir}; use rustc::ty::TyCtxt; -use rustc::ty::{RegionKind, RegionVid}; -use rustc::ty::RegionKind::ReScope; +use rustc::ty::RegionVid; use rustc_data_structures::bit_set::{BitSet, BitSetOperator}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use rustc_data_structures::sync::Lrc; use dataflow::{BitDenotation, BlockSets, InitialFlow}; pub use dataflow::indexes::BorrowIndex; @@ -42,8 +37,6 @@ use std::rc::Rc; pub struct Borrows<'a, 'gcx: 'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, mir: &'a Mir<'tcx>, - scope_tree: Lrc, - root_scope: Option, borrow_set: Rc>, borrows_out_of_scope_at_location: FxHashMap>, @@ -150,18 +143,8 @@ impl<'a, 'gcx, 'tcx> Borrows<'a, 'gcx, 'tcx> { tcx: TyCtxt<'a, 'gcx, 'tcx>, mir: &'a Mir<'tcx>, nonlexical_regioncx: Rc>, - def_id: DefId, - body_id: Option, borrow_set: &Rc>, ) -> Self { - let scope_tree = tcx.region_scope_tree(def_id); - let root_scope = body_id.map(|body_id| { - region::Scope { - id: tcx.hir.body(body_id).value.hir_id.local_id, - data: region::ScopeData::CallSite - } - }); - let mut borrows_out_of_scope_at_location = FxHashMap::default(); for (borrow_index, borrow_data) in borrow_set.borrows.iter_enumerated() { let borrow_region = borrow_data.region.to_region_vid(); @@ -177,22 +160,18 @@ impl<'a, 'gcx, 'tcx> Borrows<'a, 'gcx, 'tcx> { mir: mir, borrow_set: borrow_set.clone(), borrows_out_of_scope_at_location, - scope_tree, - root_scope, _nonlexical_regioncx: nonlexical_regioncx, } } crate fn borrows(&self) -> &IndexVec> { &self.borrow_set.borrows } - pub fn scope_tree(&self) -> &Lrc { &self.scope_tree } pub fn location(&self, idx: BorrowIndex) -> &Location { &self.borrow_set.borrows[idx].reserve_location } /// Add all borrows to the kill set, if those borrows are out of scope at `location`. - /// That means either they went out of either a nonlexical scope, if we care about those - /// at the moment, or the location represents a lexical EndRegion + /// That means they went out of a nonlexical scope fn kill_loans_out_of_scope_at_location(&self, sets: &mut BlockSets, location: Location) { @@ -253,9 +232,6 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { }); match stmt.kind { - mir::StatementKind::EndRegion(_) => { - } - mir::StatementKind::Assign(ref lhs, ref rhs) => { // Make sure there are no remaining borrows for variables // that are assigned over. @@ -282,22 +258,13 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { panic!("could not find BorrowIndex for location {:?}", location); }); - if let RegionKind::ReEmpty = region { - // If the borrowed value dies before the borrow is used, the region for - // the borrow can be empty. Don't track the borrow in that case. - debug!("Borrows::statement_effect_on_borrows \ - location: {:?} stmt: {:?} has empty region, killing {:?}", - location, stmt.kind, index); - sets.kill(*index); - return - } else { - debug!("Borrows::statement_effect_on_borrows location: {:?} stmt: {:?}", - location, stmt.kind); - } - - assert!(self.borrow_set.region_map.get(region).unwrap_or_else(|| { - panic!("could not find BorrowIndexs for region {:?}", region); - }).contains(&index)); + assert!(self.borrow_set.region_map + .get(®ion.to_region_vid()) + .unwrap_or_else(|| { + panic!("could not find BorrowIndexs for RegionVid {:?}", region); + }) + .contains(&index) + ); sets.gen(*index); // Issue #46746: Two-phase borrows handles @@ -339,7 +306,8 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { mir::StatementKind::FakeRead(..) | mir::StatementKind::SetDiscriminant { .. } | mir::StatementKind::StorageLive(..) | - mir::StatementKind::Validate(..) | + mir::StatementKind::Retag { .. } | + mir::StatementKind::EscapeToRaw { .. } | mir::StatementKind::AscribeUserType(..) | mir::StatementKind::Nop => {} @@ -353,52 +321,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { self.kill_loans_out_of_scope_at_location(sets, location); } - fn terminator_effect(&self, sets: &mut BlockSets, location: Location) { - debug!("Borrows::terminator_effect sets: {:?} location: {:?}", sets, location); - - let block = &self.mir.basic_blocks().get(location.block).unwrap_or_else(|| { - panic!("could not find block at location {:?}", location); - }); - - let term = block.terminator(); - match term.kind { - mir::TerminatorKind::Resume | - mir::TerminatorKind::Return | - mir::TerminatorKind::GeneratorDrop => { - // When we return from the function, then all `ReScope`-style regions - // are guaranteed to have ended. - // Normally, there would be `EndRegion` statements that come before, - // and hence most of these loans will already be dead -- but, in some cases - // like unwind paths, we do not always emit `EndRegion` statements, so we - // add some kills here as a "backup" and to avoid spurious error messages. - for (borrow_index, borrow_data) in self.borrow_set.borrows.iter_enumerated() { - if let ReScope(scope) = borrow_data.region { - // Check that the scope is not actually a scope from a function that is - // a parent of our closure. Note that the CallSite scope itself is - // *outside* of the closure, for some weird reason. - if let Some(root_scope) = self.root_scope { - if *scope != root_scope && - self.scope_tree.is_subscope_of(*scope, root_scope) - { - sets.kill(borrow_index); - } - } - } - } - } - mir::TerminatorKind::Abort | - mir::TerminatorKind::SwitchInt {..} | - mir::TerminatorKind::Drop {..} | - mir::TerminatorKind::DropAndReplace {..} | - mir::TerminatorKind::Call {..} | - mir::TerminatorKind::Assert {..} | - mir::TerminatorKind::Yield {..} | - mir::TerminatorKind::Goto {..} | - mir::TerminatorKind::FalseEdges {..} | - mir::TerminatorKind::FalseUnwind {..} | - mir::TerminatorKind::Unreachable => {} - } - } + fn terminator_effect(&self, _: &mut BlockSets, _: Location) {} fn propagate_call_return(&self, _in_out: &mut BitSet, diff --git a/src/librustc_mir/dataflow/impls/storage_liveness.rs b/src/librustc_mir/dataflow/impls/storage_liveness.rs index ab03ace23d..c8faa34df8 100644 --- a/src/librustc_mir/dataflow/impls/storage_liveness.rs +++ b/src/librustc_mir/dataflow/impls/storage_liveness.rs @@ -21,7 +21,7 @@ pub struct MaybeStorageLive<'a, 'tcx: 'a> { impl<'a, 'tcx: 'a> MaybeStorageLive<'a, 'tcx> { pub fn new(mir: &'a Mir<'tcx>) -> Self { - MaybeStorageLive { mir: mir } + MaybeStorageLive { mir } } pub fn mir(&self) -> &Mir<'tcx> { diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index da4bd780eb..c19145636e 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -724,20 +724,6 @@ impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation } } } - - pub fn new_from_sets(mir: &'a Mir<'tcx>, - dead_unwinds: &'a BitSet, - sets: AllSets, - denotation: D) -> Self { - DataflowAnalysis { - mir, - dead_unwinds, - flow_state: DataflowState { - sets: sets, - operator: denotation, - } - } - } } impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation diff --git a/src/librustc_mir/dataflow/move_paths/builder.rs b/src/librustc_mir/dataflow/move_paths/builder.rs index 08696dc098..7fe27e97d3 100644 --- a/src/librustc_mir/dataflow/move_paths/builder.rs +++ b/src/librustc_mir/dataflow/move_paths/builder.rs @@ -12,12 +12,12 @@ use rustc::ty::{self, TyCtxt}; use rustc::mir::*; use rustc::mir::tcx::RvalueInitializationState; use rustc_data_structures::indexed_vec::{IndexVec}; +use smallvec::{SmallVec, smallvec}; use std::collections::hash_map::Entry; use std::mem; use super::abs_domain::Lift; - use super::{LocationMap, MoveData, MovePath, MovePathLookup, MovePathIndex, MoveOut, MoveOutIndex}; use super::{MoveError, InitIndex, Init, InitLocation, LookupResult, InitKind}; use super::IllegalMoveOriginKind::*; @@ -64,8 +64,8 @@ impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> { } fn new_move_path(move_paths: &mut IndexVec>, - path_map: &mut IndexVec>, - init_path_map: &mut IndexVec>, + path_map: &mut IndexVec>, + init_path_map: &mut IndexVec>, parent: Option, place: Place<'tcx>) -> MovePathIndex @@ -83,10 +83,10 @@ impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> { move_paths[move_path].next_sibling = next_sibling; } - let path_map_ent = path_map.push(vec![]); + let path_map_ent = path_map.push(smallvec![]); assert_eq!(path_map_ent, move_path); - let init_path_map_ent = init_path_map.push(vec![]); + let init_path_map_ent = init_path_map.push(smallvec![]); assert_eq!(init_path_map_ent, move_path); move_path @@ -128,7 +128,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { proj: &PlaceProjection<'tcx>) -> Result> { - let base = try!(self.move_path_for(&proj.base)); + let base = self.move_path_for(&proj.base)?; let mir = self.builder.mir; let tcx = self.builder.tcx; let place_ty = proj.base.ty(mir, tcx).to_ty(tcx); @@ -289,7 +289,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { self.gather_init(output, InitKind::Deep); } } - for input in inputs.iter() { + for (_, input) in inputs.iter() { self.gather_operand(input); } } @@ -301,8 +301,8 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { span_bug!(stmt.source_info.span, "SetDiscriminant should not exist during borrowck"); } - StatementKind::EndRegion(_) | - StatementKind::Validate(..) | + StatementKind::Retag { .. } | + StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => {} } @@ -430,6 +430,20 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { fn gather_init(&mut self, place: &Place<'tcx>, kind: InitKind) { debug!("gather_init({:?}, {:?})", self.loc, place); + let place = match place { + // Check if we are assigning into a field of a union, if so, lookup the place + // of the union so it is marked as initialized again. + Place::Projection(box Projection { + base, + elem: ProjectionElem::Field(_, _), + }) if match base.ty(self.builder.mir, self.builder.tcx).to_ty(self.builder.tcx).sty { + ty::TyKind::Adt(def, _) if def.is_union() => true, + _ => false, + } => base, + // Otherwise, lookup the place. + _ => place, + }; + if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(place) { let init = self.builder.data.inits.push(Init { location: InitLocation::Statement(self.loc), diff --git a/src/librustc_mir/dataflow/move_paths/mod.rs b/src/librustc_mir/dataflow/move_paths/mod.rs index 58a2b93610..2a026b8f52 100644 --- a/src/librustc_mir/dataflow/move_paths/mod.rs +++ b/src/librustc_mir/dataflow/move_paths/mod.rs @@ -13,6 +13,7 @@ use rustc::ty::{self, TyCtxt}; use rustc::mir::*; use rustc::util::nodemap::FxHashMap; use rustc_data_structures::indexed_vec::{IndexVec}; +use smallvec::SmallVec; use syntax_pos::{Span}; use std::fmt; @@ -141,14 +142,14 @@ pub struct MoveData<'tcx> { /// of executing the code at `l`. (There can be multiple MoveOut's /// for a given `l` because each MoveOut is associated with one /// particular path being moved.) - pub loc_map: LocationMap>, - pub path_map: IndexVec>, + pub loc_map: LocationMap>, + pub path_map: IndexVec>, pub rev_lookup: MovePathLookup<'tcx>, pub inits: IndexVec, /// Each Location `l` is mapped to the Inits that are effects /// of executing the code at `l`. - pub init_loc_map: LocationMap>, - pub init_path_map: IndexVec>, + pub init_loc_map: LocationMap>, + pub init_path_map: IndexVec>, } pub trait HasMoveData<'tcx> { diff --git a/src/librustc_mir/hair/constant.rs b/src/librustc_mir/hair/constant.rs new file mode 100644 index 0000000000..c98ef31c2b --- /dev/null +++ b/src/librustc_mir/hair/constant.rs @@ -0,0 +1,102 @@ +use syntax::ast; +use rustc::ty::{self, Ty, TyCtxt, ParamEnv}; +use syntax_pos::symbol::Symbol; +use rustc::mir::interpret::{ConstValue, Scalar}; + +#[derive(PartialEq)] +crate enum LitToConstError { + UnparseableFloat, + Reported, +} + +crate fn lit_to_const<'a, 'gcx, 'tcx>( + lit: &'tcx ast::LitKind, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ty: Ty<'tcx>, + neg: bool, +) -> Result<&'tcx ty::Const<'tcx>, LitToConstError> { + use syntax::ast::*; + + let trunc = |n| { + let param_ty = ParamEnv::reveal_all().and(tcx.lift_to_global(&ty).unwrap()); + let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size; + trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits()); + let shift = 128 - width.bits(); + let result = (n << shift) >> shift; + trace!("trunc result: {}", result); + Ok(ConstValue::Scalar(Scalar::Bits { + bits: result, + size: width.bytes() as u8, + })) + }; + + use rustc::mir::interpret::*; + let lit = match *lit { + LitKind::Str(ref s, _) => { + let s = s.as_str(); + let id = tcx.allocate_bytes(s.as_bytes()); + ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &tcx) + }, + LitKind::ByteStr(ref data) => { + let id = tcx.allocate_bytes(data); + ConstValue::Scalar(Scalar::Ptr(id.into())) + }, + LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { + bits: n as u128, + size: 1, + }), + LitKind::Int(n, _) if neg => { + let n = n as i128; + let n = n.overflowing_neg().0; + trunc(n as u128)? + }, + LitKind::Int(n, _) => trunc(n)?, + LitKind::Float(n, fty) => { + parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)? + } + LitKind::FloatUnsuffixed(n) => { + let fty = match ty.sty { + ty::Float(fty) => fty, + _ => bug!() + }; + parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)? + } + LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)), + LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)), + }; + Ok(ty::Const::from_const_value(tcx, lit, ty)) +} + +fn parse_float<'tcx>( + num: Symbol, + fty: ast::FloatTy, + neg: bool, +) -> Result, ()> { + let num = num.as_str(); + use rustc_apfloat::ieee::{Single, Double}; + use rustc_apfloat::Float; + let (bits, size) = match fty { + ast::FloatTy::F32 => { + num.parse::().map_err(|_| ())?; + let mut f = num.parse::().unwrap_or_else(|e| { + panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e) + }); + if neg { + f = -f; + } + (f.to_bits(), 4) + } + ast::FloatTy::F64 => { + num.parse::().map_err(|_| ())?; + let mut f = num.parse::().unwrap_or_else(|e| { + panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e) + }); + if neg { + f = -f; + } + (f.to_bits(), 8) + } + }; + + Ok(ConstValue::Scalar(Scalar::Bits { bits, size })) +} diff --git a/src/librustc_mir/hair/cx/block.rs b/src/librustc_mir/hair/cx/block.rs index 586d6d87fa..d56ddcb494 100644 --- a/src/librustc_mir/hair/cx/block.rs +++ b/src/librustc_mir/hair/cx/block.rs @@ -57,6 +57,7 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, for (index, stmt) in stmts.iter().enumerate() { let hir_id = cx.tcx.hir.node_to_hir_id(stmt.node.id()); let opt_dxn_ext = cx.region_scope_tree.opt_destruction_scope(hir_id.local_id); + let stmt_span = StatementSpan(cx.tcx.hir.span(stmt.node.id())); match stmt.node { hir::StmtKind::Expr(ref expr, _) | hir::StmtKind::Semi(ref expr, _) => { @@ -69,6 +70,7 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, expr: expr.to_ref(), }, opt_destruction_scope: opt_dxn_ext, + span: stmt_span, }))) } hir::StmtKind::Decl(ref decl, _) => { @@ -111,6 +113,7 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, lint_level: cx.lint_level_of(local.id), }, opt_destruction_scope: opt_dxn_ext, + span: stmt_span, }))); } } diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index 1df5f78975..2e9edf20c5 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -15,7 +15,7 @@ use hair::cx::block; use hair::cx::to_ref::ToRef; use hair::util::UserAnnotatedTyHelpers; use rustc::hir::def::{Def, CtorKind}; -use rustc::mir::interpret::GlobalId; +use rustc::mir::interpret::{GlobalId, ErrorHandled}; use rustc::ty::{self, AdtKind, Ty}; use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow, AutoBorrowMutability}; use rustc::ty::cast::CastKind as TyCastKind; @@ -284,7 +284,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, Some((adt_def, adt_def.variant_index_with_id(variant_id))) } Def::StructCtor(_, CtorKind::Fn) | - Def::SelfCtor(..) => Some((adt_def, 0)), + Def::SelfCtor(..) => Some((adt_def, VariantIdx::new(0))), _ => None, } }) @@ -468,7 +468,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, AdtKind::Struct | AdtKind::Union => { ExprKind::Adt { adt_def: adt, - variant_index: 0, + variant_index: VariantIdx::new(0), substs, user_ty: cx.user_substs_applied_to_adt(expr.hir_id, adt), fields: field_refs(cx, fields), @@ -571,8 +571,9 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let span = cx.tcx.def_span(def_id); let count = match cx.tcx.at(span).const_eval(cx.param_env.and(global_id)) { Ok(cv) => cv.unwrap_usize(cx.tcx), - Err(e) => { - e.report_as_error(cx.tcx.at(span), "could not evaluate array length"); + Err(ErrorHandled::Reported) => 0, + Err(ErrorHandled::TooGeneric) => { + cx.tcx.sess.span_err(span, "array lengths can't depend on generic parameters"); 0 }, }; @@ -1060,7 +1061,7 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // ...but the upvar might be an `&T` or `&mut T` capture, at which // point we need an implicit deref let upvar_id = ty::UpvarId { - var_id: var_hir_id, + var_path: ty::UpvarPath {hir_id: var_hir_id}, closure_expr_id: LocalDefId::from_def_id(closure_def_id), }; match cx.tables().upvar_capture(upvar_id) { @@ -1177,7 +1178,7 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, -> ExprRef<'tcx> { let var_hir_id = cx.tcx.hir.node_to_hir_id(freevar.var_id()); let upvar_id = ty::UpvarId { - var_id: var_hir_id, + var_path: ty::UpvarPath { hir_id: var_hir_id }, closure_expr_id: cx.tcx.hir.local_def_id(closure_expr.id).to_local(), }; let upvar_capture = cx.tables().upvar_capture(upvar_id); diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 5f79813596..c414088b65 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -25,12 +25,13 @@ use rustc::infer::InferCtxt; use rustc::ty::subst::Subst; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::{Kind, Substs}; -use syntax::ast::{self, LitKind}; +use rustc::ty::layout::VariantIdx; +use syntax::ast; use syntax::attr; use syntax::symbol::Symbol; use rustc::hir; use rustc_data_structures::sync::Lrc; -use hair::pattern::parse_float; +use hair::constant::{lit_to_const, LitToConstError}; #[derive(Clone)] pub struct Cx<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { @@ -130,7 +131,6 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { ty::Const::from_bool(self.tcx, false) } - // FIXME: Combine with rustc_mir::hair::pattern::lit_to_const pub fn const_eval_literal( &mut self, lit: &'tcx ast::LitKind, @@ -140,61 +140,19 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { ) -> &'tcx ty::Const<'tcx> { trace!("const_eval_literal: {:#?}, {:?}, {:?}, {:?}", lit, ty, sp, neg); - let parse_float = |num, fty| -> ConstValue<'tcx> { - parse_float(num, fty, neg).unwrap_or_else(|_| { + match lit_to_const(lit, self.tcx, ty, neg) { + Ok(c) => c, + Err(LitToConstError::UnparseableFloat) => { // FIXME(#31407) this is only necessary because float parsing is buggy - self.tcx.sess.span_fatal(sp, "could not evaluate float literal (see issue #31407)"); - }) - }; - - let trunc = |n| { - let param_ty = self.param_env.and(self.tcx.lift_to_global(&ty).unwrap()); - let width = self.tcx.layout_of(param_ty).unwrap().size; - trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits()); - let shift = 128 - width.bits(); - let result = (n << shift) >> shift; - trace!("trunc result: {}", result); - ConstValue::Scalar(Scalar::Bits { - bits: result, - size: width.bytes() as u8, - }) - }; - - use rustc::mir::interpret::*; - let lit = match *lit { - LitKind::Str(ref s, _) => { - let s = s.as_str(); - let id = self.tcx.allocate_bytes(s.as_bytes()); - ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, self.tcx) + self.tcx.sess.span_err(sp, "could not evaluate float literal (see issue #31407)"); + // create a dummy value and continue compiling + Const::from_bits(self.tcx, 0, self.param_env.and(ty)) }, - LitKind::ByteStr(ref data) => { - let id = self.tcx.allocate_bytes(data); - ConstValue::Scalar(Scalar::Ptr(id.into())) - }, - LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { - bits: n as u128, - size: 1, - }), - LitKind::Int(n, _) if neg => { - let n = n as i128; - let n = n.overflowing_neg().0; - trunc(n as u128) - }, - LitKind::Int(n, _) => trunc(n), - LitKind::Float(n, fty) => { - parse_float(n, fty) + Err(LitToConstError::Reported) => { + // create a dummy value and continue compiling + Const::from_bits(self.tcx, 0, self.param_env.and(ty)) } - LitKind::FloatUnsuffixed(n) => { - let fty = match ty.sty { - ty::Float(fty) => fty, - _ => bug!() - }; - parse_float(n, fty) - } - LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)), - LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)), - }; - ty::Const::from_const_value(self.tcx, lit, ty) + } } pub fn pattern_from_hir(&mut self, p: &hir::Pat) -> Pattern<'tcx> { @@ -228,7 +186,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { bug!("found no method `{}` in `{:?}`", method_name, trait_def_id); } - pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: usize) -> Vec { + pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: VariantIdx) -> Vec { (0..adt_def.variants[variant_index].fields.len()) .map(Field::new) .collect() diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index 8a24851de8..e604b118ea 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -19,12 +19,14 @@ use rustc::hir::def_id::DefId; use rustc::middle::region; use rustc::ty::subst::Substs; use rustc::ty::{AdtDef, UpvarSubsts, Region, Ty, Const}; +use rustc::ty::layout::VariantIdx; use rustc::hir; use syntax::ast; use syntax_pos::Span; use self::cx::Cx; pub mod cx; +mod constant; pub mod pattern; pub use self::pattern::{BindingMode, Pattern, PatternKind, FieldPattern}; @@ -71,10 +73,14 @@ pub enum StmtRef<'tcx> { Mirror(Box>), } +#[derive(Clone, Debug)] +pub struct StatementSpan(pub Span); + #[derive(Clone, Debug)] pub struct Stmt<'tcx> { pub kind: StmtKind<'tcx>, pub opt_destruction_scope: Option, + pub span: StatementSpan, } #[derive(Clone, Debug)] @@ -115,7 +121,7 @@ pub enum StmtKind<'tcx> { /// reference to an expression in this enum is an `ExprRef<'tcx>`, which /// may in turn be another instance of this enum (boxed), or else an /// unlowered `&'tcx H::Expr`. Note that instances of `Expr` are very -/// shortlived. They are created by `Hair::to_expr`, analyzed and +/// short-lived. They are created by `Hair::to_expr`, analyzed and /// converted into MIR, and then discarded. /// /// If you compare `Expr` to the full compiler AST, you will see it is @@ -264,7 +270,7 @@ pub enum ExprKind<'tcx> { }, Adt { adt_def: &'tcx AdtDef, - variant_index: usize, + variant_index: VariantIdx, substs: &'tcx Substs<'tcx>, /// Optional user-given substs: for something like `let x = diff --git a/src/librustc_mir/hair/pattern/_match.rs b/src/librustc_mir/hair/pattern/_match.rs index 77483ad184..fd869d6c33 100644 --- a/src/librustc_mir/hair/pattern/_match.rs +++ b/src/librustc_mir/hair/pattern/_match.rs @@ -55,11 +55,11 @@ /// all the values it covers are already covered by row 2. /// /// To compute `U`, we must have two other concepts. -/// 1. `S(c, P)` is a "specialised matrix", where `c` is a constructor (like `Some` or +/// 1. `S(c, P)` is a "specialized matrix", where `c` is a constructor (like `Some` or /// `None`). You can think of it as filtering `P` to just the rows whose *first* pattern /// can cover `c` (and expanding OR-patterns into distinct patterns), and then expanding /// the constructor into all of its components. -/// The specialisation of a row vector is computed by `specialize`. +/// The specialization of a row vector is computed by `specialize`. /// /// It is computed as follows. For each row `p_i` of P, we have four cases: /// 1.1. `p_(i,1) = c(r_1, .., r_a)`. Then `S(c, P)` has a corresponding row: @@ -179,7 +179,7 @@ use super::{PatternFoldable, PatternFolder, compare_const_vals}; use rustc::hir::def_id::DefId; use rustc::hir::RangeEnd; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::layout::{Integer, IntegerExt}; +use rustc::ty::layout::{Integer, IntegerExt, VariantIdx}; use rustc::mir::Field; use rustc::mir::interpret::ConstValue; @@ -422,12 +422,12 @@ pub enum Constructor<'tcx> { } impl<'tcx> Constructor<'tcx> { - fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> usize { + fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx { match self { &Variant(vid) => adt.variant_index_with_id(vid), &Single => { assert!(!adt.is_enum()); - 0 + VariantIdx::new(0) } _ => bug!("bad constructor {:?} for adt {:?}", self, adt) } @@ -669,14 +669,14 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, } ty::Int(ity) if exhaustive_integer_patterns => { // FIXME(49937): refactor these bit manipulations into interpret. - let bits = Integer::from_attr(cx.tcx, SignedInt(ity)).size().bits() as u128; + let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128; let min = 1u128 << (bits - 1); let max = (1u128 << (bits - 1)) - 1; vec![ConstantRange(min, max, pcx.ty, RangeEnd::Included)] } ty::Uint(uty) if exhaustive_integer_patterns => { // FIXME(49937): refactor these bit manipulations into interpret. - let bits = Integer::from_attr(cx.tcx, UnsignedInt(uty)).size().bits() as u128; + let bits = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size().bits() as u128; let max = !0u128 >> (128 - bits); vec![ConstantRange(0, max, pcx.ty, RangeEnd::Included)] } @@ -862,7 +862,7 @@ impl<'tcx> IntRange<'tcx> { fn signed_bias(tcx: TyCtxt<'_, 'tcx, 'tcx>, ty: Ty<'tcx>) -> u128 { match ty.sty { ty::Int(ity) => { - let bits = Integer::from_attr(tcx, SignedInt(ity)).size().bits() as u128; + let bits = Integer::from_attr(&tcx, SignedInt(ity)).size().bits() as u128; 1u128 << (bits - 1) } _ => 0 @@ -931,12 +931,37 @@ impl<'tcx> IntRange<'tcx> { } } -// Return a set of constructors equivalent to `all_ctors \ used_ctors`. +// A request for missing constructor data in terms of either: +// - whether or not there any missing constructors; or +// - the actual set of missing constructors. +#[derive(PartialEq)] +enum MissingCtorsInfo { + Emptiness, + Ctors, +} + +// Used by `compute_missing_ctors`. +#[derive(Debug, PartialEq)] +enum MissingCtors<'tcx> { + Empty, + NonEmpty, + + // Note that the Vec can be empty. + Ctors(Vec>), +} + +// When `info` is `MissingCtorsInfo::Ctors`, compute a set of constructors +// equivalent to `all_ctors \ used_ctors`. When `info` is +// `MissingCtorsInfo::Emptiness`, just determines if that set is empty or not. +// (The split logic gives a performance win, because we always need to know if +// the set is empty, but we rarely need the full set, and it can be expensive +// to compute the full set.) fn compute_missing_ctors<'a, 'tcx: 'a>( + info: MissingCtorsInfo, tcx: TyCtxt<'a, 'tcx, 'tcx>, all_ctors: &Vec>, used_ctors: &Vec>, -) -> Vec> { +) -> MissingCtors<'tcx> { let mut missing_ctors = vec![]; for req_ctor in all_ctors { @@ -965,10 +990,22 @@ fn compute_missing_ctors<'a, 'tcx: 'a>( // We add `refined_ctors` instead of `req_ctor`, because then we can // provide more detailed error information about precisely which // ranges have been omitted. - missing_ctors.extend(refined_ctors); + if info == MissingCtorsInfo::Emptiness { + if !refined_ctors.is_empty() { + // The set is non-empty; return early. + return MissingCtors::NonEmpty; + } + } else { + missing_ctors.extend(refined_ctors); + } } - missing_ctors + if info == MissingCtorsInfo::Emptiness { + // If we reached here, the set is empty. + MissingCtors::Empty + } else { + MissingCtors::Ctors(missing_ctors) + } } /// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html @@ -1081,20 +1118,23 @@ pub fn is_useful<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, // feature flag is not present, so this is only // needed for that case. - // Find those constructors that are not matched by any non-wildcard patterns in the - // current column. - let missing_ctors = compute_missing_ctors(cx.tcx, &all_ctors, &used_ctors); + // Missing constructors are those that are not matched by any + // non-wildcard patterns in the current column. We always determine if + // the set is empty, but we only fully construct them on-demand, + // because they're rarely used and can be big. + let cheap_missing_ctors = + compute_missing_ctors(MissingCtorsInfo::Emptiness, cx.tcx, &all_ctors, &used_ctors); let is_privately_empty = all_ctors.is_empty() && !cx.is_uninhabited(pcx.ty); let is_declared_nonexhaustive = cx.is_non_exhaustive_enum(pcx.ty) && !cx.is_local(pcx.ty); - debug!("missing_ctors={:#?} is_privately_empty={:#?} is_declared_nonexhaustive={:#?}", - missing_ctors, is_privately_empty, is_declared_nonexhaustive); + debug!("cheap_missing_ctors={:#?} is_privately_empty={:#?} is_declared_nonexhaustive={:#?}", + cheap_missing_ctors, is_privately_empty, is_declared_nonexhaustive); // For privately empty and non-exhaustive enums, we work as if there were an "extra" // `_` constructor for the type, so we can never match over all constructors. let is_non_exhaustive = is_privately_empty || is_declared_nonexhaustive; - if missing_ctors.is_empty() && !is_non_exhaustive { + if cheap_missing_ctors == MissingCtors::Empty && !is_non_exhaustive { split_grouped_constructors(cx.tcx, all_ctors, matrix, pcx.ty).into_iter().map(|c| { is_useful_specialized(cx, matrix, v, c, pcx.ty, witness) }).find(|result| result.is_useful()).unwrap_or(NotUseful) @@ -1165,15 +1205,22 @@ pub fn is_useful<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, witness }).collect() } else { - pats.into_iter().flat_map(|witness| { - missing_ctors.iter().map(move |ctor| { - // Extends the witness with a "wild" version of this - // constructor, that matches everything that can be built with - // it. For example, if `ctor` is a `Constructor::Variant` for - // `Option::Some`, this pushes the witness for `Some(_)`. - witness.clone().push_wild_constructor(cx, ctor, pcx.ty) - }) - }).collect() + let expensive_missing_ctors = + compute_missing_ctors(MissingCtorsInfo::Ctors, cx.tcx, &all_ctors, + &used_ctors); + if let MissingCtors::Ctors(missing_ctors) = expensive_missing_ctors { + pats.into_iter().flat_map(|witness| { + missing_ctors.iter().map(move |ctor| { + // Extends the witness with a "wild" version of this + // constructor, that matches everything that can be built with + // it. For example, if `ctor` is a `Constructor::Variant` for + // `Option::Some`, this pushes the witness for `Some(_)`. + witness.clone().push_wild_constructor(cx, ctor, pcx.ty) + }) + }).collect() + } else { + bug!("cheap missing ctors") + } }; UsefulWithWitness(new_witnesses) } @@ -1406,7 +1453,7 @@ fn should_treat_range_exhaustively(tcx: TyCtxt<'_, 'tcx, 'tcx>, ctor: &Construct /// mean creating a separate constructor for every single value in the range, which is clearly /// impractical. However, observe that for some ranges of integers, the specialisation will be /// identical across all values in that range (i.e. there are equivalence classes of ranges of -/// constructors based on their `is_useful_specialised` outcome). These classes are grouped by +/// constructors based on their `is_useful_specialized` outcome). These classes are grouped by /// the patterns that apply to them (in the matrix `P`). We can split the range whenever the /// patterns that apply to that range (specifically: the patterns that *intersect* with that range) /// change. diff --git a/src/librustc_mir/hair/pattern/check_match.rs b/src/librustc_mir/hair/pattern/check_match.rs index 5282666165..a6bd36e582 100644 --- a/src/librustc_mir/hair/pattern/check_match.rs +++ b/src/librustc_mir/hair/pattern/check_match.rs @@ -52,7 +52,7 @@ impl<'a, 'tcx> Visitor<'tcx> for OuterVisitor<'a, 'tcx> { } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - tcx.hir.krate().visit_all_item_likes(&mut OuterVisitor { tcx: tcx }.as_deep_visitor()); + tcx.hir.krate().visit_all_item_likes(&mut OuterVisitor { tcx }.as_deep_visitor()); tcx.sess.abort_if_errors(); } @@ -234,12 +234,12 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { if !scrutinee_is_uninhabited { // We know the type is inhabited, so this must be wrong let mut err = create_e0004(self.tcx.sess, scrut.span, - format!("non-exhaustive patterns: type {} \ + format!("non-exhaustive patterns: type `{}` \ is non-empty", pat_ty)); span_help!(&mut err, scrut.span, - "Please ensure that all possible cases are being handled; \ - possibly adding wildcards or more match arms."); + "ensure that all possible cases are being handled, \ + possibly by adding wildcards or more match arms"); err.emit(); } // If the type *is* uninhabited, it's vacuously exhaustive diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index f70e93182c..61d8297fec 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -19,12 +19,14 @@ pub(crate) use self::check_match::check_match; use const_eval::{const_field, const_variant_index}; use hair::util::UserAnnotatedTyHelpers; +use hair::constant::*; use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability}; use rustc::mir::{ProjectionElem, UserTypeAnnotation, UserTypeProjection, UserTypeProjections}; use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, sign_extend}; use rustc::ty::{self, Region, TyCtxt, AdtDef, Ty}; use rustc::ty::subst::{Substs, Kind}; +use rustc::ty::layout::VariantIdx; use rustc::hir::{self, PatKind, RangeEnd}; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::pat_util::EnumerateAndAdjustIterator; @@ -36,7 +38,6 @@ use std::fmt; use syntax::ast; use syntax::ptr::P; use syntax_pos::Span; -use syntax_pos::symbol::Symbol; #[derive(Clone, Debug)] pub enum PatternError { @@ -111,7 +112,7 @@ impl<'tcx> PatternTypeProjections<'tcx> { pub(crate) fn variant(&self, adt_def: &'tcx AdtDef, - variant_index: usize, + variant_index: VariantIdx, field: Field) -> Self { self.map_projs(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field)) } @@ -153,7 +154,7 @@ impl<'tcx> PatternTypeProjection<'tcx> { pub(crate) fn variant(&self, adt_def: &'tcx AdtDef, - variant_index: usize, + variant_index: VariantIdx, field: Field) -> Self { let mut new = self.clone(); new.0.projs.push(ProjectionElem::Downcast(adt_def, variant_index)); @@ -200,7 +201,7 @@ pub enum PatternKind<'tcx> { Variant { adt_def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>, - variant_index: usize, + variant_index: VariantIdx, subpatterns: Vec>, }, @@ -273,7 +274,7 @@ impl<'tcx> fmt::Display for Pattern<'tcx> { } _ => if let ty::Adt(adt, _) = self.ty.sty { if !adt.is_enum() { - Some(&adt.variants[0]) + Some(&adt.variants[VariantIdx::new(0)]) } else { None } @@ -571,7 +572,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { }) .collect(); - PatternKind::Leaf { subpatterns: subpatterns } + PatternKind::Leaf { subpatterns } } ty::Error => { // Avoid ICE (#50577) return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) }; @@ -778,13 +779,13 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { subpatterns, } } else { - PatternKind::Leaf { subpatterns: subpatterns } + PatternKind::Leaf { subpatterns } } } Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) | Def::SelfCtor(..) => { - PatternKind::Leaf { subpatterns: subpatterns } + PatternKind::Leaf { subpatterns } } _ => { @@ -845,13 +846,13 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { Ok(value) => { return self.const_to_pat(instance, value, id, span) }, - Err(err) => { - err.report_as_error( - self.tcx.at(span), + Err(_) => { + self.tcx.sess.span_err( + span, "could not evaluate constant pattern", ); PatternKind::Wild - }, + } } }, None => { @@ -890,12 +891,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ); *self.const_to_pat(instance, val, expr.hir_id, lit.span).kind }, - Err(e) => { - if e == LitToConstError::UnparseableFloat { - self.errors.push(PatternError::FloatBug); - } + Err(LitToConstError::UnparseableFloat) => { + self.errors.push(PatternError::FloatBug); PatternKind::Wild }, + Err(LitToConstError::Reported) => PatternKind::Wild, } }, hir::ExprKind::Path(ref qpath) => *self.lower_path(qpath, expr.hir_id, expr.span).kind, @@ -913,12 +913,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ); *self.const_to_pat(instance, val, expr.hir_id, lit.span).kind }, - Err(e) => { - if e == LitToConstError::UnparseableFloat { - self.errors.push(PatternError::FloatBug); - } + Err(LitToConstError::UnparseableFloat) => { + self.errors.push(PatternError::FloatBug); PatternKind::Wild }, + Err(LitToConstError::Reported) => PatternKind::Wild, } } _ => span_bug!(expr.span, "not a literal: {:?}", expr), @@ -1160,7 +1159,7 @@ impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { } => PatternKind::Variant { adt_def: adt_def.fold_with(folder), substs: substs.fold_with(folder), - variant_index: variant_index.fold_with(folder), + variant_index, subpatterns: subpatterns.fold_with(folder) }, PatternKind::Leaf { @@ -1293,124 +1292,3 @@ pub fn compare_const_vals<'a, 'tcx>( fallback() } - -#[derive(PartialEq)] -enum LitToConstError { - UnparseableFloat, - Propagated, -} - -// FIXME: Combine with rustc_mir::hair::cx::const_eval_literal -fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - neg: bool) - -> Result<&'tcx ty::Const<'tcx>, LitToConstError> { - use syntax::ast::*; - - use rustc::mir::interpret::*; - let lit = match *lit { - LitKind::Str(ref s, _) => { - let s = s.as_str(); - let id = tcx.allocate_bytes(s.as_bytes()); - ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, tcx) - }, - LitKind::ByteStr(ref data) => { - let id = tcx.allocate_bytes(data); - ConstValue::Scalar(Scalar::Ptr(id.into())) - }, - LitKind::Byte(n) => ConstValue::Scalar(Scalar::Bits { - bits: n as u128, - size: 1, - }), - LitKind::Int(n, _) => { - enum Int { - Signed(IntTy), - Unsigned(UintTy), - } - let ity = match ty.sty { - ty::Int(IntTy::Isize) => Int::Signed(tcx.sess.target.isize_ty), - ty::Int(other) => Int::Signed(other), - ty::Uint(UintTy::Usize) => Int::Unsigned(tcx.sess.target.usize_ty), - ty::Uint(other) => Int::Unsigned(other), - ty::Error => { // Avoid ICE (#51963) - return Err(LitToConstError::Propagated); - } - _ => bug!("literal integer type with bad type ({:?})", ty.sty), - }; - // This converts from LitKind::Int (which is sign extended) to - // Scalar::Bytes (which is zero extended) - let n = match ity { - // FIXME(oli-obk): are these casts correct? - Int::Signed(IntTy::I8) if neg => - (n as i8).overflowing_neg().0 as u8 as u128, - Int::Signed(IntTy::I16) if neg => - (n as i16).overflowing_neg().0 as u16 as u128, - Int::Signed(IntTy::I32) if neg => - (n as i32).overflowing_neg().0 as u32 as u128, - Int::Signed(IntTy::I64) if neg => - (n as i64).overflowing_neg().0 as u64 as u128, - Int::Signed(IntTy::I128) if neg => - (n as i128).overflowing_neg().0 as u128, - Int::Signed(IntTy::I8) | Int::Unsigned(UintTy::U8) => n as u8 as u128, - Int::Signed(IntTy::I16) | Int::Unsigned(UintTy::U16) => n as u16 as u128, - Int::Signed(IntTy::I32) | Int::Unsigned(UintTy::U32) => n as u32 as u128, - Int::Signed(IntTy::I64) | Int::Unsigned(UintTy::U64) => n as u64 as u128, - Int::Signed(IntTy::I128)| Int::Unsigned(UintTy::U128) => n, - _ => bug!(), - }; - let size = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap().size.bytes() as u8; - ConstValue::Scalar(Scalar::Bits { - bits: n, - size, - }) - }, - LitKind::Float(n, fty) => { - parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)? - } - LitKind::FloatUnsuffixed(n) => { - let fty = match ty.sty { - ty::Float(fty) => fty, - _ => bug!() - }; - parse_float(n, fty, neg).map_err(|_| LitToConstError::UnparseableFloat)? - } - LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)), - LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)), - }; - Ok(ty::Const::from_const_value(tcx, lit, ty)) -} - -pub fn parse_float<'tcx>( - num: Symbol, - fty: ast::FloatTy, - neg: bool, -) -> Result, ()> { - let num = num.as_str(); - use rustc_apfloat::ieee::{Single, Double}; - use rustc_apfloat::Float; - let (bits, size) = match fty { - ast::FloatTy::F32 => { - num.parse::().map_err(|_| ())?; - let mut f = num.parse::().unwrap_or_else(|e| { - panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e) - }); - if neg { - f = -f; - } - (f.to_bits(), 4) - } - ast::FloatTy::F64 => { - num.parse::().map_err(|_| ())?; - let mut f = num.parse::().unwrap_or_else(|e| { - panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e) - }); - if neg { - f = -f; - } - (f.to_bits(), 8) - } - }; - - Ok(ConstValue::Scalar(Scalar::Bits { bits, size })) -} diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 81e7a6e437..7d636b77ce 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -19,7 +19,7 @@ use rustc::mir::interpret::{ use rustc::mir::CastKind; use rustc_apfloat::Float; -use super::{EvalContext, Machine, PlaceTy, OpTy, Value}; +use super::{EvalContext, Machine, PlaceTy, OpTy, Immediate}; impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { @@ -45,30 +45,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> Misc => { let src_layout = src.layout; - let src = self.read_value(src)?; + let src = self.read_immediate(src)?; - let src = if M::ENABLE_PTR_TRACKING_HOOKS && src_layout.ty.is_region_ptr() { - // The only `Misc` casts on references are those creating raw pointers. - assert!(dest.layout.ty.is_unsafe_ptr()); - // For the purpose of the "ptr tag hooks", treat this as creating - // a new, raw reference. - let place = self.ref_to_mplace(src)?; - self.create_ref(place, None)? - } else { - *src - }; + // There are no casts to references + assert!(!dest.layout.ty.is_region_ptr()); + // Hence we make all casts erase the tag + let src = src.erase_tag().with_default_tag(); if self.type_is_fat_ptr(src_layout.ty) { match (src, self.type_is_fat_ptr(dest.layout.ty)) { // pointers to extern types - (Value::Scalar(_),_) | + (Immediate::Scalar(_),_) | // slices and trait objects to other slices/trait objects - (Value::ScalarPair(..), true) => { - // No change to value - self.write_value(src, dest)?; + (Immediate::ScalarPair(..), true) => { + // No change to immediate + self.write_immediate(src, dest)?; } // slices and trait objects to thin pointers (dropping the metadata) - (Value::ScalarPair(data, _), false) => { + (Immediate::ScalarPair(data, _), false) => { self.write_scalar(data, dest)?; } } @@ -90,8 +84,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> layout::Variants::NicheFilling { .. } => {}, } - let src = src.to_scalar()?; - let dest_val = self.cast_scalar(src, src_layout, dest.layout)?; + let dest_val = self.cast_scalar(src.to_scalar()?, src_layout, dest.layout)?; self.write_scalar(dest_val, dest)?; } } @@ -110,7 +103,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> def_id, substs, ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); - let fn_ptr = self.memory.create_fn_alloc(instance?); + let fn_ptr = self.memory.create_fn_alloc(instance?).with_default_tag(); self.write_scalar(Scalar::Ptr(fn_ptr.into()), dest)?; } ref other => bug!("reify fn pointer on {:?}", other), @@ -118,11 +111,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } UnsafeFnPointer => { - let src = self.read_value(src)?; + let src = self.read_immediate(src)?; match dest.layout.ty.sty { ty::FnPtr(_) => { // No change to value - self.write_value(*src, dest)?; + self.write_immediate(*src, dest)?; } ref other => bug!("fn to unsafe fn cast on {:?}", other), } @@ -143,9 +136,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> substs, ty::ClosureKind::FnOnce, ); - let fn_ptr = self.memory.create_fn_alloc(instance); - let val = Value::Scalar(Scalar::Ptr(fn_ptr.into()).into()); - self.write_value(val, dest)?; + let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag(); + let val = Immediate::Scalar(Scalar::Ptr(fn_ptr.into()).into()); + self.write_immediate(val, dest)?; } ref other => bug!("closure fn pointer on {:?}", other), } @@ -326,24 +319,28 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> match (&src_pointee_ty.sty, &dest_pointee_ty.sty) { (&ty::Array(_, length), &ty::Slice(_)) => { - let ptr = self.read_value(src)?.to_scalar_ptr()?; + let ptr = self.read_immediate(src)?.to_scalar_ptr()?; // u64 cast is from usize to u64, which is always good - let val = Value::new_slice(ptr, length.unwrap_usize(self.tcx.tcx), self.tcx.tcx); - self.write_value(val, dest) + let val = Immediate::new_slice( + ptr, + length.unwrap_usize(self.tcx.tcx), + self, + ); + self.write_immediate(val, dest) } (&ty::Dynamic(..), &ty::Dynamic(..)) => { // For now, upcasts are limited to changes in marker // traits, and hence never actually require an actual // change to the vtable. - let val = self.read_value(src)?; - self.write_value(*val, dest) + let val = self.read_immediate(src)?; + self.write_immediate(*val, dest) } (_, &ty::Dynamic(ref data, _)) => { // Initial cast from sized to dyn trait let vtable = self.get_vtable(src_pointee_ty, data.principal())?; - let ptr = self.read_value(src)?.to_scalar_ptr()?; - let val = Value::new_dyn_trait(ptr, vtable); - self.write_value(val, dest) + let ptr = self.read_immediate(src)?.to_scalar_ptr()?; + let val = Immediate::new_dyn_trait(ptr, vtable); + self.write_immediate(val, dest) } _ => bug!("invalid unsizing {:?} -> {:?}", src.layout.ty, dest.layout.ty), @@ -390,7 +387,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> src_field.into() } Err(..) => { - let src_field_layout = src.layout.field(&self, i)?; + let src_field_layout = src.layout.field(self, i)?; // this must be a field covering the entire thing assert_eq!(src.layout.fields.offset(i).bytes(), 0); assert_eq!(src_field_layout.size, src.layout.size); diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 64ad4c2eec..d36d530fe7 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -14,7 +14,6 @@ use std::mem; use syntax::source_map::{self, Span, DUMMY_SP}; use rustc::hir::def_id::DefId; use rustc::hir::def::Def; -use rustc::hir::map::definitions::DefPathData; use rustc::mir; use rustc::ty::layout::{ self, Size, Align, HasDataLayout, LayoutOf, TyLayout @@ -24,6 +23,7 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::query::TyCtxtAt; use rustc_data_structures::indexed_vec::IndexVec; use rustc::mir::interpret::{ + ErrorHandled, GlobalId, Scalar, FrameInfo, AllocId, EvalResult, EvalErrorKind, truncate, sign_extend, @@ -31,7 +31,7 @@ use rustc::mir::interpret::{ use rustc_data_structures::fx::FxHashMap; use super::{ - Value, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef, + Immediate, Operand, MemPlace, MPlaceTy, Place, PlaceTy, ScalarMaybeUndef, Memory, Machine }; @@ -46,10 +46,10 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { pub(crate) param_env: ty::ParamEnv<'tcx>, /// The virtual memory system. - pub memory: Memory<'a, 'mir, 'tcx, M>, + pub(crate) memory: Memory<'a, 'mir, 'tcx, M>, /// The virtual call stack. - pub(crate) stack: Vec>, + pub(crate) stack: Vec>, /// A cache for deduplicating vtables pub(super) vtables: FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), AllocId>, @@ -57,7 +57,7 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { /// A stack frame. #[derive(Clone)] -pub struct Frame<'mir, 'tcx: 'mir, Tag=()> { +pub struct Frame<'mir, 'tcx: 'mir, Tag=(), Extra=()> { //////////////////////////////////////////////////////////////////////////////// // Function and callsite information //////////////////////////////////////////////////////////////////////////////// @@ -96,6 +96,9 @@ pub struct Frame<'mir, 'tcx: 'mir, Tag=()> { /// The index of the currently evaluated statement. pub stmt: usize, + + /// Extra data for the machine + pub extra: Extra, } #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -138,8 +141,8 @@ impl<'tcx, Tag> LocalValue { } } -impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout - for &'b EvalContext<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout + for EvalContext<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &layout::TargetDataLayout { @@ -147,16 +150,7 @@ impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout } } -impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout - for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> -{ - #[inline] - fn data_layout(&self) -> &layout::TargetDataLayout { - &self.tcx.data_layout - } -} - -impl<'b, 'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for &'b EvalContext<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for EvalContext<'a, 'mir, 'tcx, M> where M: Machine<'a, 'mir, 'tcx> { #[inline] @@ -165,40 +159,19 @@ impl<'b, 'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for &'b EvalContext<'a, 'mir } } -impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> layout::HasTyCtxt<'tcx> - for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> -{ - #[inline] - fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> { - *self.tcx - } -} - -impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf - for &'b EvalContext<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf + for EvalContext<'a, 'mir, 'tcx, M> { type Ty = Ty<'tcx>; type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; #[inline] - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(self.param_env.and(ty)) .map_err(|layout| EvalErrorKind::Layout(layout).into()) } } -impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf - for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> -{ - type Ty = Ty<'tcx>; - type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; - - #[inline] - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - (&**self).layout_of(ty) - } -} - impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { pub fn new( tcx: TyCtxtAt<'a, 'tcx, 'tcx>, @@ -226,7 +199,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } #[inline(always)] - pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag>] { + pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] { &self.stack } @@ -237,12 +210,12 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } #[inline(always)] - pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag> { + pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> { self.stack.last().expect("no call frames exist") } #[inline(always)] - pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag> { + pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> { self.stack.last_mut().expect("no call frames exist") } @@ -324,7 +297,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc pub fn layout_of_local( &self, - frame: &Frame<'mir, 'tcx, M::PointerTag>, + frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, local: mir::Local ) -> EvalResult<'tcx, TyLayout<'tcx>> { let local_ty = frame.mir.local_decls[local].ty; @@ -332,9 +305,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc self.layout_of(local_ty) } - pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> { - let ptr = self.memory.allocate_static_bytes(s.as_bytes()); - Ok(Value::new_slice(Scalar::Ptr(ptr), s.len() as u64, self.tcx.tcx)) + pub fn str_to_immediate(&mut self, s: &str) -> EvalResult<'tcx, Immediate> { + let ptr = self.memory.allocate_static_bytes(s.as_bytes()).with_default_tag(); + Ok(Immediate::new_slice(Scalar::Ptr(ptr), s.len() as u64, self)) } /// Return the actual dynamic size and alignment of the place at the given type. @@ -346,7 +319,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Size, Align)>> { if !layout.is_unsized() { - return Ok(Some(layout.size_and_align())); + return Ok(Some((layout.size, layout.align.abi))); } match layout.ty.sty { ty::Adt(..) | ty::Tuple(..) => { @@ -358,7 +331,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc trace!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); - let sized_align = layout.align; + let sized_align = layout.align.abi; trace!( "DST {} statically sized prefix size: {:?} align: {:?}", layout.ty, @@ -370,8 +343,21 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // the last field). Can't have foreign types here, how would we // adjust alignment and size for them? let field = layout.field(self, layout.fields.count() - 1)?; - let (unsized_size, unsized_align) = self.size_and_align_of(metadata, field)? - .expect("Fields cannot be extern types"); + let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? { + Some(size_and_align) => size_and_align, + None => { + // A field with extern type. If this field is at offset 0, we behave + // like the underlying extern type. + // FIXME: Once we have made decisions for how to handle size and alignment + // of `extern type`, this should be adapted. It is just a temporary hack + // to get some code to work that probably ought to work. + if sized_size == Size::ZERO { + return Ok(None) + } else { + bug!("Fields cannot be extern types, unless they are at offset 0") + } + } + }; // FIXME (#26403, #27023): We should be adding padding // to `sized_size` (to accommodate the `unsized_align` @@ -398,7 +384,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // // `(size + (align-1)) & -align` - Ok(Some((size.abi_align(align), align))) + Ok(Some((size.align_to(align), align))) } ty::Dynamic(..) => { let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?; @@ -408,8 +394,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc ty::Slice(_) | ty::Str => { let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?; - let (elem_size, align) = layout.field(self, 0)?.size_and_align(); - Ok(Some((elem_size * len, align))) + let elem = layout.field(self, 0)?; + Ok(Some((elem.size * len, elem.align.abi))) } ty::Foreign(_) => { @@ -441,6 +427,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc ::log_settings::settings().indentation += 1; // first push a stack frame so we have access to the local substs + let extra = M::stack_push(self)?; self.stack.push(Frame { mir, block: mir::START_BLOCK, @@ -452,15 +439,16 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc span, instance, stmt: 0, + extra, }); // don't allocate at all for trivial constants if mir.local_decls.len() > 1 { - // We put some marker value into the locals that we later want to initialize. + // We put some marker immediate into the locals that we later want to initialize. // This can be anything except for LocalValue::Dead -- because *that* is the // value we use for things that we know are initially dead. let dummy = - LocalValue::Live(Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef))); + LocalValue::Live(Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef))); let mut locals = IndexVec::from_elem(dummy, &mir.local_decls); // Return place is handled specially by the `eval_place` functions, and the // entry in `locals` should never be used. Make it dead, to be sure. @@ -521,15 +509,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc let frame = self.stack.pop().expect( "tried to pop a stack frame, but there were none", ); + M::stack_pop(self, frame.extra)?; + // Abort early if we do not want to clean up: We also avoid validation in that case, + // because this is CTFE and the final value will be thoroughly validated anyway. match frame.return_to_block { - StackPopCleanup::Goto(block) => { - self.goto_block(block)?; - } + StackPopCleanup::Goto(_) => {}, StackPopCleanup::None { cleanup } => { if !cleanup { - // Leak the locals. Also skip validation, this is only used by - // static/const computation which does its own (stronger) final - // validation. + assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked"); + // Leak the locals, skip validation. return Ok(()); } } @@ -538,7 +526,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc for local in frame.locals { self.deallocate_local(local)?; } - // Validate the return value. + // Validate the return value. Do this after deallocating so that we catch dangling + // references. if let Some(return_place) = frame.return_place { if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! @@ -550,7 +539,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // return place is always a local and then this cannot happen. self.validate_operand( self.place_to_op(return_place)?, - &mut vec![], + vec![], None, /*const_mode*/false, )?; @@ -559,6 +548,13 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // Uh, that shouldn't happen... the function did not intend to return return err!(Unreachable); } + // Jump to new block -- *after* validation so that the spans make more sense. + match frame.return_to_block { + StackPopCleanup::Goto(block) => { + self.goto_block(block)?; + } + StackPopCleanup::None { .. } => {} + } if self.stack.len() > 1 { // FIXME should be "> 0", printing topmost frame crashes rustc... debug!("CONTINUING({}) {}", self.cur_frame(), self.frame().instance); @@ -605,14 +601,26 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc Ok(()) } - pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> { + pub fn const_eval_raw( + &self, + gid: GlobalId<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let param_env = if self.tcx.is_static(gid.instance.def_id()).is_some() { ty::ParamEnv::reveal_all() } else { self.param_env }; - self.tcx.const_eval(param_env.and(gid)) - .map_err(|err| EvalErrorKind::ReferencedConstant(err).into()) + // We use `const_eval_raw` here, and get an unvalidated result. That is okay: + // Our result will later be validated anyway, and there seems no good reason + // to have to fail early here. This is also more consistent with + // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles. + let val = self.tcx.const_eval_raw(param_env.and(gid)).map_err(|err| { + match err { + ErrorHandled::Reported => EvalErrorKind::ReferencedConstant, + ErrorHandled::TooGeneric => EvalErrorKind::TooGeneric, + } + })?; + self.raw_const_to_mplace(val) } pub fn dump_place(&self, place: Place) { @@ -641,19 +649,19 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc let (ptr, align) = mplace.to_scalar_ptr_align(); match ptr { Scalar::Ptr(ptr) => { - write!(msg, " by align({}) ref:", align.abi()).unwrap(); + write!(msg, " by align({}) ref:", align.bytes()).unwrap(); allocs.push(ptr.alloc_id); } ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), } } - Ok(Operand::Immediate(Value::Scalar(val))) => { + Ok(Operand::Immediate(Immediate::Scalar(val))) => { write!(msg, " {:?}", val).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } - Ok(Operand::Immediate(Value::ScalarPair(val1, val2))) => { + Ok(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); @@ -670,7 +678,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc Place::Ptr(mplace) => { match mplace.ptr { Scalar::Ptr(ptr) => { - trace!("by align({}) ref:", mplace.align.abi()); + trace!("by align({}) ref:", mplace.align.bytes()); self.memory.dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), @@ -679,11 +687,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } } - pub fn generate_stacktrace(&self, explicit_span: Option) -> (Vec, Span) { + pub fn generate_stacktrace(&self, explicit_span: Option) -> Vec> { let mut last_span = None; let mut frames = Vec::new(); - // skip 1 because the last frame is just the environment of the constant - for &Frame { instance, span, mir, block, stmt, .. } in self.stack().iter().skip(1).rev() { + for &Frame { instance, span, mir, block, stmt, .. } in self.stack().iter().rev() { // make sure we don't emit frames that are duplicates of the previous if explicit_span == Some(span) { last_span = Some(span); @@ -696,13 +703,6 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } else { last_span = Some(span); } - let location = if self.tcx.def_key(instance.def_id()).disambiguated_data.data - == DefPathData::ClosureExpr - { - "closure".to_owned() - } else { - instance.to_string() - }; let block = &mir.basic_blocks()[block]; let source_info = if stmt < block.statements.len() { block.statements[stmt].source_info @@ -713,10 +713,10 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc mir::ClearCrossCrate::Set(ref ivs) => Some(ivs[source_info.scope].lint_root), mir::ClearCrossCrate::Clear => None, }; - frames.push(FrameInfo { span, location, lint_root }); + frames.push(FrameInfo { call_site: span, instance, lint_root }); } trace!("generate stacktrace: {:#?}, {:?}", frames, explicit_span); - (frames, self.tcx.span) + frames } #[inline(always)] diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 5fa0fef369..bbee6e0b49 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -60,7 +60,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> match intrinsic_name { "min_align_of" => { let elem_ty = substs.type_at(0); - let elem_align = self.layout_of(elem_ty)?.align.abi(); + let elem_align = self.layout_of(elem_ty)?.align.abi.bytes(); let align_val = Scalar::from_uint(elem_align, dest.layout.size); self.write_scalar(align_val, dest)?; } @@ -115,8 +115,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { - let lhs = self.read_value(args[0])?; - let rhs = self.read_value(args[1])?; + let lhs = self.read_immediate(args[0])?; + let rhs = self.read_immediate(args[1])?; let (bin_op, ignore_overflow) = match intrinsic_name { "overflowing_add" => (BinOp::Add, true), "overflowing_sub" => (BinOp::Sub, true), @@ -133,14 +133,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } } "unchecked_shl" | "unchecked_shr" => { - let l = self.read_value(args[0])?; - let r = self.read_value(args[1])?; + let l = self.read_immediate(args[0])?; + let r = self.read_immediate(args[1])?; let bin_op = match intrinsic_name { "unchecked_shl" => BinOp::Shl, "unchecked_shr" => BinOp::Shr, _ => bug!("Already checked for int ops") }; - let (val, overflowed) = self.binary_op_val(bin_op, l, r)?; + let (val, overflowed) = self.binary_op_imm(bin_op, l, r)?; if overflowed { let layout = self.layout_of(substs.type_at(0))?; let r_val = r.to_scalar()?.to_bits(layout.size)?; @@ -150,6 +150,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } self.write_scalar(val, dest)?; } + "rotate_left" | "rotate_right" => { + // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) + // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) + let layout = self.layout_of(substs.type_at(0))?; + let val_bits = self.read_scalar(args[0])?.to_bits(layout.size)?; + let raw_shift_bits = self.read_scalar(args[1])?.to_bits(layout.size)?; + let width_bits = layout.size.bits() as u128; + let shift_bits = raw_shift_bits % width_bits; + let inv_shift_bits = (width_bits - raw_shift_bits) % width_bits; + let result_bits = if intrinsic_name == "rotate_left" { + (val_bits << shift_bits) | (val_bits >> inv_shift_bits) + } else { + (val_bits >> shift_bits) | (val_bits << inv_shift_bits) + }; + let truncated_bits = self.truncate(result_bits, layout); + let result = Scalar::from_uint(truncated_bits, layout.size); + self.write_scalar(result, dest)?; + } "transmute" => { self.copy_op_transmute(args[0], dest)?; } @@ -172,8 +190,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // Some fn calls are actually BinOp intrinsics if let Some((op, oflo)) = self.tcx.is_binop_lang_item(def_id) { let dest = dest.expect("128 lowerings can't diverge"); - let l = self.read_value(args[0])?; - let r = self.read_value(args[1])?; + let l = self.read_immediate(args[0])?; + let r = self.read_immediate(args[1])?; if oflo { self.binop_with_overflow(op, l, r, dest)?; } else { @@ -183,8 +201,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } else if Some(def_id) == self.tcx.lang_items().panic_fn() { assert!(args.len() == 1); // &(&'static str, &'static str, u32, u32) - let ptr = self.read_value(args[0])?; - let place = self.ref_to_mplace(ptr)?; + let place = self.deref_operand(args[0])?; let (msg, file, line, col) = ( self.mplace_field(place, 0)?, self.mplace_field(place, 1)?, @@ -192,9 +209,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> self.mplace_field(place, 3)?, ); - let msg_place = self.ref_to_mplace(self.read_value(msg.into())?)?; + let msg_place = self.deref_operand(msg.into())?; let msg = Symbol::intern(self.read_str(msg_place)?); - let file_place = self.ref_to_mplace(self.read_value(file.into())?)?; + let file_place = self.deref_operand(file.into())?; let file = Symbol::intern(self.read_str(file_place)?); let line = self.read_scalar(line.into())?.to_u32()?; let col = self.read_scalar(col.into())?.to_u32()?; @@ -203,17 +220,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> assert!(args.len() == 2); // &'static str, &(&'static str, u32, u32) let msg = args[0]; - let ptr = self.read_value(args[1])?; - let place = self.ref_to_mplace(ptr)?; + let place = self.deref_operand(args[1])?; let (file, line, col) = ( self.mplace_field(place, 0)?, self.mplace_field(place, 1)?, self.mplace_field(place, 2)?, ); - let msg_place = self.ref_to_mplace(self.read_value(msg.into())?)?; + let msg_place = self.deref_operand(msg.into())?; let msg = Symbol::intern(self.read_str(msg_place)?); - let file_place = self.ref_to_mplace(self.read_value(file.into())?)?; + let file_place = self.deref_operand(file.into())?; let file = Symbol::intern(self.read_str(file_place)?); let line = self.read_scalar(line.into())?.to_u32()?; let col = self.read_scalar(col.into())?.to_u32()?; diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 1318bbe1c2..d7a3a27bbe 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -15,22 +15,15 @@ use std::borrow::{Borrow, Cow}; use std::hash::Hash; -use rustc::hir::def_id::DefId; +use rustc::hir::{self, def_id::DefId}; use rustc::mir; -use rustc::ty::{self, Ty, layout::{Size, TyLayout}, query::TyCtxtAt}; +use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt}; use super::{ - Allocation, AllocId, EvalResult, Scalar, - EvalContext, PlaceTy, OpTy, Pointer, MemoryKind, + Allocation, AllocId, EvalResult, Scalar, AllocationExtra, + EvalContext, PlaceTy, MPlaceTy, OpTy, Pointer, MemoryKind, }; -/// Classifying memory accesses -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum MemoryAccess { - Read, - Write, -} - /// Whether this kind of memory is allowed to leak pub trait MayLeak: Copy { fn may_leak(self) -> bool; @@ -81,10 +74,19 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows" /// . + /// The `default()` is used for pointers to consts, statics, vtables and functions. type PointerTag: ::std::fmt::Debug + Default + Copy + Eq + Hash + 'static; + /// Extra data stored in every call frame. + type FrameExtra; + + /// Extra data stored in memory. A reference to this is available when `AllocExtra` + /// gets initialized, so you can e.g. have an `Rc` here if there is global state you + /// need access to in the `AllocExtra` hooks. + type MemoryExtra: Default; + /// Extra data stored in every allocation. - type AllocExtra: ::std::fmt::Debug + Default + Clone; + type AllocExtra: AllocationExtra; /// Memory's allocation map type MemoryMap: @@ -95,17 +97,13 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { Default + Clone; - /// The memory kind to use for copied statics -- or None if those are not supported. + /// The memory kind to use for copied statics -- or None if statics should not be mutated + /// and thus any such attempt will cause a `ModifiedStatic` error to be raised. /// Statics are copied under two circumstances: When they are mutated, and when /// `static_with_default_tag` or `find_foreign_static` (see below) returns an owned allocation /// that is added to the memory so that the work is not done twice. const STATIC_KIND: Option; - /// As an optimization, you can prevent the pointer tracking hooks from ever being - /// called. You should only do this if you do not care about provenance tracking. - /// This controls the `tag_reference` and `tag_dereference` hooks. - const ENABLE_PTR_TRACKING_HOOKS: bool; - /// Whether to enforce the validity invariant fn enforce_validity(ecx: &EvalContext<'a, 'mir, 'tcx, Self>) -> bool; @@ -146,20 +144,22 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// the machine memory. (This relies on `AllocMap::get_or` being able to add the /// owned allocation to the map even when the map is shared.) fn find_foreign_static( - tcx: TyCtxtAt<'a, 'tcx, 'tcx>, def_id: DefId, + tcx: TyCtxtAt<'a, 'tcx, 'tcx>, + memory_extra: &Self::MemoryExtra, ) -> EvalResult<'tcx, Cow<'tcx, Allocation>>; /// Called to turn an allocation obtained from the `tcx` into one that has - /// the appropriate tags on each pointer. + /// the right type for this machine. /// /// This should avoid copying if no work has to be done! If this returns an owned - /// allocation (because a copy had to be done to add the tags), machine memory will + /// allocation (because a copy had to be done to add tags or metadata), machine memory will /// cache the result. (This relies on `AllocMap::get_or` being able to add the /// owned allocation to the map even when the map is shared.) - fn static_with_default_tag( - alloc: &'_ Allocation - ) -> Cow<'_, Allocation>; + fn adjust_static_allocation<'b>( + alloc: &'b Allocation, + memory_extra: &Self::MemoryExtra, + ) -> Cow<'b, Allocation>; /// Called for all binary operations on integer(-like) types when one operand is a pointer /// value, and for the `Offset` operation that is inherently about pointers. @@ -180,56 +180,52 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { dest: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx>; - /// Hook for performing extra checks on a memory access. - /// - /// Takes read-only access to the allocation so we can keep all the memory read - /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you - /// need to mutate. - #[inline] - fn memory_accessed( - _alloc: &Allocation, - _ptr: Pointer, - _size: Size, - _access: MemoryAccess, - ) -> EvalResult<'tcx> { - Ok(()) - } - - /// Hook for performing extra checks when memory gets deallocated. - #[inline] - fn memory_deallocated( - _alloc: &mut Allocation, - _ptr: Pointer, - ) -> EvalResult<'tcx> { - Ok(()) - } - - /// Executed when evaluating the `&` operator: Creating a new reference. - /// This has the chance to adjust the tag. - /// `borrow_kind` can be `None` in case a raw ptr is being created. - fn tag_reference( + /// Add the tag for a newly allocated pointer. + fn tag_new_allocation( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - ptr: Pointer, - pointee_ty: Ty<'tcx>, - pointee_size: Size, - borrow_kind: Option, - ) -> EvalResult<'tcx, Self::PointerTag>; + ptr: Pointer, + kind: MemoryKind, + ) -> EvalResult<'tcx, Pointer>; /// Executed when evaluating the `*` operator: Following a reference. - /// This has the change to adjust the tag. - fn tag_dereference( - ecx: &EvalContext<'a, 'mir, 'tcx, Self>, - ptr: Pointer, - ptr_ty: Ty<'tcx>, - ) -> EvalResult<'tcx, Self::PointerTag>; - - /// Execute a validation operation + /// This has the chance to adjust the tag. It should not change anything else! + /// `mutability` can be `None` in case a raw ptr is being dereferenced. #[inline] - fn validation_op( + fn tag_dereference( + _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, + place: MPlaceTy<'tcx, Self::PointerTag>, + _mutability: Option, + ) -> EvalResult<'tcx, Scalar> { + Ok(place.ptr) + } + + /// Execute a retagging operation + #[inline] + fn retag( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _op: ::rustc::mir::ValidationOp, - _operand: &::rustc::mir::ValidationOperand<'tcx, ::rustc::mir::Place<'tcx>>, + _fn_entry: bool, + _place: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx> { Ok(()) } + + /// Execute an escape-to-raw operation + #[inline] + fn escape_to_raw( + _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + _ptr: OpTy<'tcx, Self::PointerTag>, + ) -> EvalResult<'tcx> { + Ok(()) + } + + /// Called immediately before a new stack frame got pushed + fn stack_push( + ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ) -> EvalResult<'tcx, Self::FrameExtra>; + + /// Called immediately after a stack frame gets popped + fn stack_pop( + ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + extra: Self::FrameExtra, + ) -> EvalResult<'tcx>; } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 6fe490c6ef..97d7e1586b 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -21,16 +21,16 @@ use std::ptr; use std::borrow::Cow; use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt}; -use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout}; +use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout}; pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint}; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; use syntax::ast::Mutability; use super::{ - Pointer, AllocId, Allocation, ConstValue, GlobalId, + Pointer, AllocId, Allocation, GlobalId, AllocationExtra, EvalResult, Scalar, EvalErrorKind, AllocType, PointerArithmetic, - Machine, MemoryAccess, AllocMap, MayLeak, ScalarMaybeUndef, + Machine, AllocMap, MayLeak, ErrorHandled, InboundsCheck, }; #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] @@ -73,20 +73,15 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { /// that do not exist any more. dead_alloc_map: FxHashMap, + /// Extra data added by the machine. + pub extra: M::MemoryExtra, + /// Lets us implement `HasDataLayout`, which is awfully convenient. pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>, } -impl<'b, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout - for &'b Memory<'a, 'mir, 'tcx, M> -{ - #[inline] - fn data_layout(&self) -> &TargetDataLayout { - &self.tcx.data_layout - } -} -impl<'a, 'b, 'c, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout - for &'b &'c mut Memory<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout + for Memory<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &TargetDataLayout { @@ -96,13 +91,19 @@ impl<'a, 'b, 'c, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead // carefully copy only the reachable parts. -impl<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> - Clone for Memory<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, M> + Clone +for + Memory<'a, 'mir, 'tcx, M> +where + M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=(), MemoryExtra=()>, + M::MemoryMap: AllocMap, Allocation)>, { fn clone(&self) -> Self { Memory { alloc_map: self.alloc_map.clone(), dead_alloc_map: self.dead_alloc_map.clone(), + extra: (), tcx: self.tcx, } } @@ -111,18 +112,19 @@ impl<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn new(tcx: TyCtxtAt<'a, 'tcx, 'tcx>) -> Self { Memory { - alloc_map: Default::default(), + alloc_map: M::MemoryMap::default(), dead_alloc_map: FxHashMap::default(), + extra: M::MemoryExtra::default(), tcx, } } - pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer { - Pointer::from(self.tcx.alloc_map.lock().create_fn_alloc(instance)).with_default_tag() + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> Pointer { + Pointer::from(self.tcx.alloc_map.lock().create_fn_alloc(instance)) } - pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer { - Pointer::from(self.tcx.allocate_bytes(bytes)).with_default_tag() + pub fn allocate_static_bytes(&mut self, bytes: &[u8]) -> Pointer { + Pointer::from(self.tcx.allocate_bytes(bytes)) } pub fn allocate_with( @@ -140,9 +142,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { size: Size, align: Align, kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer> { - let ptr = Pointer::from(self.allocate_with(Allocation::undef(size, align), kind)?); - Ok(ptr.with_default_tag()) + ) -> EvalResult<'tcx, Pointer> { + let extra = AllocationExtra::memory_allocated(size, &self.extra); + Ok(Pointer::from(self.allocate_with(Allocation::undef(size, align, extra), kind)?)) } pub fn reallocate( @@ -153,17 +155,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { new_size: Size, new_align: Align, kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer> { + ) -> EvalResult<'tcx, Pointer> { if ptr.offset.bytes() != 0 { return err!(ReallocateNonBasePtr); } - // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc" + // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc". + // This happens so rarely, the perf advantage is outweighed by the maintenance cost. let new_ptr = self.allocate(new_size, new_align, kind)?; self.copy( ptr.into(), old_align, - new_ptr.into(), + new_ptr.with_default_tag().into(), new_align, old_size.min(new_size), /*nonoverlapping*/ true, @@ -232,7 +235,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } // Let the machine take some extra action - M::memory_deallocated(&mut alloc, ptr)?; + let size = Size::from_bytes(alloc.bytes.len() as u64); + AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?; // Don't forget to remember size and align of this now-dead allocation let old = self.dead_alloc_map.insert( @@ -256,17 +260,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // Check non-NULL/Undef, extract offset let (offset, alloc_align) = match ptr { Scalar::Ptr(ptr) => { - let (size, align) = self.get_size_and_align(ptr.alloc_id); // check this is not NULL -- which we can ensure only if this is in-bounds // of some (potentially dead) allocation. - if ptr.offset > size { - return err!(PointerOutOfBounds { - ptr: ptr.erase_tag(), - access: true, - allocation_size: size, - }); - }; - // keep data for alignment check + let align = self.check_bounds_ptr_maybe_dead(ptr)?; (ptr.offset.bytes(), align) } Scalar::Bits { bits, size } => { @@ -281,18 +277,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } }; // Check alignment - if alloc_align.abi() < required_align.abi() { + if alloc_align.bytes() < required_align.bytes() { return err!(AlignmentCheckFailed { has: alloc_align, required: required_align, }); } - if offset % required_align.abi() == 0 { + if offset % required_align.bytes() == 0 { Ok(()) } else { - let has = offset % required_align.abi(); + let has = offset % required_align.bytes(); err!(AlignmentCheckFailed { - has: Align::from_bytes(has, has).unwrap(), + has: Align::from_bytes(has).unwrap(), required: required_align, }) } @@ -300,34 +296,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end /// of an allocation (i.e., at the first *inaccessible* location) *is* considered - /// in-bounds! This follows C's/LLVM's rules. The `access` boolean is just used - /// for the error message. - /// If you want to check bounds before doing a memory access, be sure to - /// check the pointer one past the end of your access, then everything will - /// work out exactly. - pub fn check_bounds_ptr(&self, ptr: Pointer, access: bool) -> EvalResult<'tcx> { - let alloc = self.get(ptr.alloc_id)?; - let allocation_size = alloc.bytes.len() as u64; - if ptr.offset.bytes() > allocation_size { - return err!(PointerOutOfBounds { - ptr: ptr.erase_tag(), - access, - allocation_size: Size::from_bytes(allocation_size), - }); - } - Ok(()) - } - - /// Check if the memory range beginning at `ptr` and of size `Size` is "in-bounds". - #[inline(always)] - pub fn check_bounds( + /// in-bounds! This follows C's/LLVM's rules. + /// This function also works for deallocated allocations. + /// Use `.get(ptr.alloc_id)?.check_bounds_ptr(ptr)` if you want to force the allocation + /// to still be live. + /// If you want to check bounds before doing a memory access, better first obtain + /// an `Allocation` and call `check_bounds`. + pub fn check_bounds_ptr_maybe_dead( &self, ptr: Pointer, - size: Size, - access: bool - ) -> EvalResult<'tcx> { - // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow) - self.check_bounds_ptr(ptr.offset(size, &*self)?, access) + ) -> EvalResult<'tcx, Align> { + let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id); + ptr.check_in_alloc(allocation_size, InboundsCheck::MaybeDead)?; + Ok(align) } } @@ -339,15 +320,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { /// this machine use the same pointer tag, so it is indirected through /// `M::static_with_default_tag`. fn get_static_alloc( - tcx: TyCtxtAt<'a, 'tcx, 'tcx>, id: AllocId, + tcx: TyCtxtAt<'a, 'tcx, 'tcx>, + memory_extra: &M::MemoryExtra, ) -> EvalResult<'tcx, Cow<'tcx, Allocation>> { let alloc = tcx.alloc_map.lock().get(id); let def_id = match alloc { Some(AllocType::Memory(mem)) => { // We got tcx memory. Let the machine figure out whether and how to // turn that into memory with the right pointer tag. - return Ok(M::static_with_default_tag(mem)) + return Ok(M::adjust_static_allocation(mem, memory_extra)) } Some(AllocType::Function(..)) => { return err!(DerefFunctionPointer) @@ -361,25 +343,27 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // We got a "lazy" static that has not been computed yet, do some work trace!("static_alloc: Need to compute {:?}", def_id); if tcx.is_foreign_item(def_id) { - return M::find_foreign_static(tcx, def_id); + return M::find_foreign_static(def_id, tcx, memory_extra); } let instance = Instance::mono(tcx.tcx, def_id); let gid = GlobalId { instance, promoted: None, }; - tcx.const_eval(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| { + // use the raw query here to break validation cycles. Later uses of the static will call the + // full query anyway + tcx.const_eval_raw(ty::ParamEnv::reveal_all().and(gid)).map_err(|err| { // no need to report anything, the const_eval call takes care of that for statics assert!(tcx.is_static(def_id).is_some()); - EvalErrorKind::ReferencedConstant(err).into() - }).map(|const_val| { - if let ConstValue::ByRef(_, allocation, _) = const_val.val { - // We got tcx memory. Let the machine figure out whether and how to - // turn that into memory with the right pointer tag. - M::static_with_default_tag(allocation) - } else { - bug!("Matching on non-ByRef static") + match err { + ErrorHandled::Reported => EvalErrorKind::ReferencedConstant.into(), + ErrorHandled::TooGeneric => EvalErrorKind::TooGeneric.into(), } + }).map(|raw_const| { + let allocation = tcx.alloc_map.lock().unwrap_memory(raw_const.alloc_id); + // We got tcx memory. Let the machine figure out whether and how to + // turn that into memory with the right pointer tag. + M::adjust_static_allocation(allocation, memory_extra) }) } @@ -389,7 +373,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // `get_static_alloc` that we can actually use directly without inserting anything anywhere. // So the error type is `EvalResult<'tcx, &Allocation>`. let a = self.alloc_map.get_or(id, || { - let alloc = Self::get_static_alloc(self.tcx, id).map_err(Err)?; + let alloc = Self::get_static_alloc(id, self.tcx, &self.extra).map_err(Err)?; match alloc { Cow::Borrowed(alloc) => { // We got a ref, cheaply return that as an "error" so that the @@ -418,17 +402,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { id: AllocId, ) -> EvalResult<'tcx, &mut Allocation> { let tcx = self.tcx; + let memory_extra = &self.extra; let a = self.alloc_map.get_mut_or(id, || { // Need to make a copy, even if `get_static_alloc` is able // to give us a cheap reference. - let alloc = Self::get_static_alloc(tcx, id)?; + let alloc = Self::get_static_alloc(id, tcx, memory_extra)?; if alloc.mutability == Mutability::Immutable { return err!(ModifiedConstantMemory); } - let kind = M::STATIC_KIND.expect( - "An allocation is being mutated but the machine does not expect that to happen" - ); - Ok((MemoryKind::Machine(kind), alloc.into_owned())) + match M::STATIC_KIND { + Some(kind) => Ok((MemoryKind::Machine(kind), alloc.into_owned())), + None => err!(ModifiedStatic), + } }); // Unpack the error type manually because type inference doesn't // work otherwise (and we cannot help it because `impl Trait`) @@ -450,14 +435,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } // Could also be a fn ptr or extern static match self.tcx.alloc_map.lock().get(id) { - Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1, 1).unwrap()), + Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()), Some(AllocType::Static(did)) => { // The only way `get` couldn't have worked here is if this is an extern static assert!(self.tcx.is_foreign_item(did)); // Use size and align of the type let ty = self.tcx.type_of(did); let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap(); - (layout.size, layout.align) + (layout.size, layout.align.abi) } _ => { // Must be a deallocated pointer @@ -522,7 +507,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { "{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), - alloc.align.abi(), + alloc.align.bytes(), extra ); @@ -610,94 +595,27 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } -/// Byte accessors +/// Byte Accessors impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { - /// The last argument controls whether we error out when there are undefined - /// or pointer bytes. You should never call this, call `get_bytes` or - /// `get_bytes_with_undef_and_ptr` instead, - /// - /// This function also guarantees that the resulting pointer will remain stable - /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies - /// on that. - fn get_bytes_internal( + pub fn read_bytes( &self, - ptr: Pointer, + ptr: Scalar, size: Size, - align: Align, - check_defined_and_ptr: bool, ) -> EvalResult<'tcx, &[u8]> { - assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`"); - self.check_align(ptr.into(), align)?; - self.check_bounds(ptr, size, true)?; - - if check_defined_and_ptr { - self.check_defined(ptr, size)?; - self.check_relocations(ptr, size)?; + if size.bytes() == 0 { + Ok(&[]) } else { - // We still don't want relocations on the *edges* - self.check_relocation_edges(ptr, size)?; + let ptr = ptr.to_ptr()?; + self.get(ptr.alloc_id)?.get_bytes(self, ptr, size) } - - let alloc = self.get(ptr.alloc_id)?; - M::memory_accessed(alloc, ptr, size, MemoryAccess::Read)?; - - assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); - assert_eq!(size.bytes() as usize as u64, size.bytes()); - let offset = ptr.offset.bytes() as usize; - Ok(&alloc.bytes[offset..offset + size.bytes() as usize]) - } - - #[inline] - fn get_bytes( - &self, - ptr: Pointer, - size: Size, - align: Align - ) -> EvalResult<'tcx, &[u8]> { - self.get_bytes_internal(ptr, size, align, true) - } - - /// It is the caller's responsibility to handle undefined and pointer bytes. - /// However, this still checks that there are no relocations on the *edges*. - #[inline] - fn get_bytes_with_undef_and_ptr( - &self, - ptr: Pointer, - size: Size, - align: Align - ) -> EvalResult<'tcx, &[u8]> { - self.get_bytes_internal(ptr, size, align, false) - } - - /// Just calling this already marks everything as defined and removes relocations, - /// so be sure to actually put data there! - fn get_bytes_mut( - &mut self, - ptr: Pointer, - size: Size, - align: Align, - ) -> EvalResult<'tcx, &mut [u8]> { - assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`"); - self.check_align(ptr.into(), align)?; - self.check_bounds(ptr, size, true)?; - - self.mark_definedness(ptr, size, true)?; - self.clear_relocations(ptr, size)?; - - let alloc = self.get_mut(ptr.alloc_id)?; - M::memory_accessed(alloc, ptr, size, MemoryAccess::Write)?; - - assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); - assert_eq!(size.bytes() as usize as u64, size.bytes()); - let offset = ptr.offset.bytes() as usize; - Ok(&mut alloc.bytes[offset..offset + size.bytes() as usize]) } } /// Interning (for CTFE) impl<'a, 'mir, 'tcx, M> Memory<'a, 'mir, 'tcx, M> where - M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=()>, + M: Machine<'a, 'mir, 'tcx, PointerTag=(), AllocExtra=(), MemoryExtra=()>, + // FIXME: Working around https://github.com/rust-lang/rust/issues/24159 M::MemoryMap: AllocMap, Allocation)>, { /// mark an allocation as static and initialized, either mutable or not @@ -732,6 +650,11 @@ where if self.alloc_map.contains_key(&alloc) { // Not yet interned, so proceed recursively self.intern_static(alloc, mutability)?; + } else if self.dead_alloc_map.contains_key(&alloc) { + // dangling pointer + return err!(ValidationFailure( + "encountered dangling pointer in final constant".into(), + )) } } Ok(()) @@ -762,10 +685,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { length: u64, nonoverlapping: bool, ) -> EvalResult<'tcx> { + self.check_align(src, src_align)?; + self.check_align(dest, dest_align)?; if size.bytes() == 0 { - // Nothing to do for ZST, other than checking alignment and non-NULLness. - self.check_align(src, src_align)?; - self.check_align(dest, dest_align)?; + // Nothing to do for ZST, other than checking alignment and + // non-NULLness which already happened. return Ok(()); } let src = src.to_ptr()?; @@ -777,7 +701,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // (`get_bytes_with_undef_and_ptr` below checks that there are no // relocations overlapping the edges; those would not be handled correctly). let relocations = { - let relocations = self.relocations(src, size)?; + let relocations = self.get(src.alloc_id)?.relocations(self, src, size); let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize)); for i in 0..length { new_relocations.extend( @@ -793,9 +717,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { new_relocations }; - // This also checks alignment, and relocation edges on the src. - let src_bytes = self.get_bytes_with_undef_and_ptr(src, size, src_align)?.as_ptr(); - let dest_bytes = self.get_bytes_mut(dest, size * length, dest_align)?.as_mut_ptr(); + let tcx = self.tcx.tcx; + + // This checks relocation edges on the src. + let src_bytes = self.get(src.alloc_id)? + .get_bytes_with_undef_and_ptr(&tcx, src, size)? + .as_ptr(); + let dest_bytes = self.get_mut(dest.alloc_id)? + .get_bytes_mut(&tcx, dest, size * length)? + .as_mut_ptr(); // SAFE: The above indexing would have panicked if there weren't at least `size` bytes // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and @@ -836,276 +766,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - - pub fn read_c_str(&self, ptr: Pointer) -> EvalResult<'tcx, &[u8]> { - let alloc = self.get(ptr.alloc_id)?; - assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); - let offset = ptr.offset.bytes() as usize; - match alloc.bytes[offset..].iter().position(|&c| c == 0) { - Some(size) => { - let p1 = Size::from_bytes((size + 1) as u64); - self.check_relocations(ptr, p1)?; - self.check_defined(ptr, p1)?; - Ok(&alloc.bytes[offset..offset + size]) - } - None => err!(UnterminatedCString(ptr.erase_tag())), - } - } - - pub fn check_bytes( - &self, - ptr: Scalar, - size: Size, - allow_ptr_and_undef: bool, - ) -> EvalResult<'tcx> { - // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); - if size.bytes() == 0 { - self.check_align(ptr, align)?; - return Ok(()); - } - let ptr = ptr.to_ptr()?; - // Check bounds, align and relocations on the edges - self.get_bytes_with_undef_and_ptr(ptr, size, align)?; - // Check undef and ptr - if !allow_ptr_and_undef { - self.check_defined(ptr, size)?; - self.check_relocations(ptr, size)?; - } - Ok(()) - } - - pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { - // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); - if size.bytes() == 0 { - self.check_align(ptr, align)?; - return Ok(&[]); - } - self.get_bytes(ptr.to_ptr()?, size, align) - } - - pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { - // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); - if src.is_empty() { - self.check_align(ptr, align)?; - return Ok(()); - } - let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?; - bytes.clone_from_slice(src); - Ok(()) - } - - pub fn write_repeat( - &mut self, - ptr: Scalar, - val: u8, - count: Size - ) -> EvalResult<'tcx> { - // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); - if count.bytes() == 0 { - self.check_align(ptr, align)?; - return Ok(()); - } - let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?; - for b in bytes { - *b = val; - } - Ok(()) - } - - /// Read a *non-ZST* scalar - pub fn read_scalar( - &self, - ptr: Pointer, - ptr_align: Align, - size: Size - ) -> EvalResult<'tcx, ScalarMaybeUndef> { - // get_bytes_unchecked tests alignment and relocation edges - let bytes = self.get_bytes_with_undef_and_ptr( - ptr, size, ptr_align.min(self.int_align(size)) - )?; - // Undef check happens *after* we established that the alignment is correct. - // We must not return Ok() for unaligned pointers! - if self.check_defined(ptr, size).is_err() { - // this inflates undefined bytes to the entire scalar, even if only a few - // bytes are undefined - return Ok(ScalarMaybeUndef::Undef); - } - // Now we do the actual reading - let bits = read_target_uint(self.tcx.data_layout.endian, bytes).unwrap(); - // See if we got a pointer - if size != self.pointer_size() { - // *Now* better make sure that the inside also is free of relocations. - self.check_relocations(ptr, size)?; - } else { - let alloc = self.get(ptr.alloc_id)?; - match alloc.relocations.get(&ptr.offset) { - Some(&(tag, alloc_id)) => { - let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag); - return Ok(ScalarMaybeUndef::Scalar(ptr.into())) - } - None => {}, - } - } - // We don't. Just return the bits. - Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size))) - } - - pub fn read_ptr_sized( - &self, - ptr: Pointer, - ptr_align: Align - ) -> EvalResult<'tcx, ScalarMaybeUndef> { - self.read_scalar(ptr, ptr_align, self.pointer_size()) - } - - /// Write a *non-ZST* scalar - pub fn write_scalar( - &mut self, - ptr: Pointer, - ptr_align: Align, - val: ScalarMaybeUndef, - type_size: Size, - ) -> EvalResult<'tcx> { - let val = match val { - ScalarMaybeUndef::Scalar(scalar) => scalar, - ScalarMaybeUndef::Undef => return self.mark_definedness(ptr, type_size, false), - }; - - let bytes = match val { - Scalar::Ptr(val) => { - assert_eq!(type_size, self.pointer_size()); - val.offset.bytes() as u128 - } - - Scalar::Bits { bits, size } => { - assert_eq!(size as u64, type_size.bytes()); - debug_assert_eq!(truncate(bits, Size::from_bytes(size.into())), bits, - "Unexpected value of size {} when writing to memory", size); - bits - }, - }; - - { - // get_bytes_mut checks alignment - let endian = self.tcx.data_layout.endian; - let dst = self.get_bytes_mut(ptr, type_size, ptr_align)?; - write_target_uint(endian, dst, bytes).unwrap(); - } - - // See if we have to also write a relocation - match val { - Scalar::Ptr(val) => { - self.get_mut(ptr.alloc_id)?.relocations.insert( - ptr.offset, - (val.tag, val.alloc_id), - ); - } - _ => {} - } - - Ok(()) - } - - pub fn write_ptr_sized( - &mut self, - ptr: Pointer, - ptr_align: Align, - val: ScalarMaybeUndef - ) -> EvalResult<'tcx> { - let ptr_size = self.pointer_size(); - self.write_scalar(ptr.into(), ptr_align, val, ptr_size) - } - - fn int_align(&self, size: Size) -> Align { - // We assume pointer-sized integers have the same alignment as pointers. - // We also assume signed and unsigned integers of the same size have the same alignment. - let ity = match size.bytes() { - 1 => layout::I8, - 2 => layout::I16, - 4 => layout::I32, - 8 => layout::I64, - 16 => layout::I128, - _ => bug!("bad integer size: {}", size.bytes()), - }; - ity.align(self) - } -} - -/// Relocations -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { - /// Return all relocations overlapping with the given ptr-offset pair. - fn relocations( - &self, - ptr: Pointer, - size: Size, - ) -> EvalResult<'tcx, &[(Size, (M::PointerTag, AllocId))]> { - // We have to go back `pointer_size - 1` bytes, as that one would still overlap with - // the beginning of this range. - let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1); - let end = ptr.offset + size; // this does overflow checking - Ok(self.get(ptr.alloc_id)?.relocations.range(Size::from_bytes(start)..end)) - } - - /// Check that there ar eno relocations overlapping with the given range. - #[inline(always)] - fn check_relocations(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { - if self.relocations(ptr, size)?.len() != 0 { - err!(ReadPointerAsBytes) - } else { - Ok(()) - } - } - - /// Remove all relocations inside the given range. - /// If there are relocations overlapping with the edges, they - /// are removed as well *and* the bytes they cover are marked as - /// uninitialized. This is a somewhat odd "spooky action at a distance", - /// but it allows strictly more code to run than if we would just error - /// immediately in that case. - fn clear_relocations(&mut self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { - // Find the start and end of the given range and its outermost relocations. - let (first, last) = { - // Find all relocations overlapping the given range. - let relocations = self.relocations(ptr, size)?; - if relocations.is_empty() { - return Ok(()); - } - - (relocations.first().unwrap().0, - relocations.last().unwrap().0 + self.pointer_size()) - }; - let start = ptr.offset; - let end = start + size; - - let alloc = self.get_mut(ptr.alloc_id)?; - - // Mark parts of the outermost relocations as undefined if they partially fall outside the - // given range. - if first < start { - alloc.undef_mask.set_range(first, start, false); - } - if last > end { - alloc.undef_mask.set_range(end, last, false); - } - - // Forget all the relocations. - alloc.relocations.remove_range(first..last); - - Ok(()) - } - - /// Error if there are relocations overlapping with the edges of the - /// given memory range. - #[inline] - fn check_relocation_edges(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { - self.check_relocations(ptr, Size::ZERO)?; - self.check_relocations(ptr.offset(size, self)?, Size::ZERO)?; - Ok(()) - } } /// Undefined bytes @@ -1137,33 +797,4 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - - /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes` - /// error which will report the first byte which is undefined. - #[inline] - fn check_defined(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx> { - let alloc = self.get(ptr.alloc_id)?; - alloc.undef_mask.is_range_defined( - ptr.offset, - ptr.offset + size, - ).or_else(|idx| err!(ReadUndefBytes(idx))) - } - - pub fn mark_definedness( - &mut self, - ptr: Pointer, - size: Size, - new_state: bool, - ) -> EvalResult<'tcx> { - if size.bytes() == 0 { - return Ok(()); - } - let alloc = self.get_mut(ptr.alloc_id)?; - alloc.undef_mask.set_range( - ptr.offset, - ptr.offset + size, - new_state, - ); - Ok(()) - } } diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 55037a99e0..96ea0d5094 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -23,6 +23,7 @@ mod terminator; mod traits; mod validity; mod intrinsics; +mod visitor; pub use rustc::mir::interpret::*; // have all the `interpret` symbols in one place: here @@ -34,8 +35,10 @@ pub use self::place::{Place, PlaceTy, MemPlace, MPlaceTy}; pub use self::memory::{Memory, MemoryKind}; -pub use self::machine::{Machine, AllocMap, MemoryAccess, MayLeak}; +pub use self::machine::{Machine, AllocMap, MayLeak}; -pub use self::operand::{ScalarMaybeUndef, Value, ValTy, Operand, OpTy}; +pub use self::operand::{ScalarMaybeUndef, Immediate, ImmTy, Operand, OpTy}; + +pub use self::visitor::{ValueVisitor, MutValueVisitor}; pub use self::validity::RefTracking; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 021e2d58f8..83ceadada6 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -13,130 +13,16 @@ use std::convert::TryInto; -use rustc::{mir, ty}; -use rustc::ty::layout::{self, Size, LayoutOf, TyLayout, HasDataLayout, IntegerExt}; +use rustc::mir; +use rustc::ty::layout::{self, Size, LayoutOf, TyLayout, HasDataLayout, IntegerExt, VariantIdx}; use rustc::mir::interpret::{ GlobalId, AllocId, ConstValue, Pointer, Scalar, - EvalResult, EvalErrorKind + EvalResult, EvalErrorKind, }; use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind}; - -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] -pub enum ScalarMaybeUndef { - Scalar(Scalar), - Undef, -} - -impl From> for ScalarMaybeUndef { - #[inline(always)] - fn from(s: Scalar) -> Self { - ScalarMaybeUndef::Scalar(s) - } -} - -impl<'tcx> ScalarMaybeUndef<()> { - #[inline] - pub fn with_default_tag(self) -> ScalarMaybeUndef - where Tag: Default - { - match self { - ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.with_default_tag()), - ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, - } - } -} - -impl<'tcx, Tag> ScalarMaybeUndef { - #[inline] - pub fn erase_tag(self) -> ScalarMaybeUndef - { - match self { - ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_tag()), - ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, - } - } - - #[inline] - pub fn not_undef(self) -> EvalResult<'static, Scalar> { - match self { - ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), - ScalarMaybeUndef::Undef => err!(ReadUndefBytes(Size::from_bytes(0))), - } - } - - #[inline(always)] - pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { - self.not_undef()?.to_ptr() - } - - #[inline(always)] - pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { - self.not_undef()?.to_bits(target_size) - } - - #[inline(always)] - pub fn to_bool(self) -> EvalResult<'tcx, bool> { - self.not_undef()?.to_bool() - } - - #[inline(always)] - pub fn to_char(self) -> EvalResult<'tcx, char> { - self.not_undef()?.to_char() - } - - #[inline(always)] - pub fn to_f32(self) -> EvalResult<'tcx, f32> { - self.not_undef()?.to_f32() - } - - #[inline(always)] - pub fn to_f64(self) -> EvalResult<'tcx, f64> { - self.not_undef()?.to_f64() - } - - #[inline(always)] - pub fn to_u8(self) -> EvalResult<'tcx, u8> { - self.not_undef()?.to_u8() - } - - #[inline(always)] - pub fn to_u32(self) -> EvalResult<'tcx, u32> { - self.not_undef()?.to_u32() - } - - #[inline(always)] - pub fn to_u64(self) -> EvalResult<'tcx, u64> { - self.not_undef()?.to_u64() - } - - #[inline(always)] - pub fn to_usize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { - self.not_undef()?.to_usize(cx) - } - - #[inline(always)] - pub fn to_i8(self) -> EvalResult<'tcx, i8> { - self.not_undef()?.to_i8() - } - - #[inline(always)] - pub fn to_i32(self) -> EvalResult<'tcx, i32> { - self.not_undef()?.to_i32() - } - - #[inline(always)] - pub fn to_i64(self) -> EvalResult<'tcx, i64> { - self.not_undef()?.to_i64() - } - - #[inline(always)] - pub fn to_isize(self, cx: impl HasDataLayout) -> EvalResult<'tcx, i64> { - self.not_undef()?.to_isize(cx) - } -} - +pub use rustc::mir::interpret::ScalarMaybeUndef; /// A `Value` represents a single immediate self-contained Rust value. /// @@ -144,54 +30,57 @@ impl<'tcx, Tag> ScalarMaybeUndef { /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary /// operations and fat pointers. This idea was taken from rustc's codegen. /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely -/// defined on `Value`, and do not have to work with a `Place`. +/// defined on `Immediate`, and do not have to work with a `Place`. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -pub enum Value { +pub enum Immediate { Scalar(ScalarMaybeUndef), ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), } -impl Value { +impl Immediate { #[inline] - pub fn with_default_tag(self) -> Value + pub fn with_default_tag(self) -> Immediate where Tag: Default { match self { - Value::Scalar(x) => Value::Scalar(x.with_default_tag()), - Value::ScalarPair(x, y) => - Value::ScalarPair(x.with_default_tag(), y.with_default_tag()), + Immediate::Scalar(x) => Immediate::Scalar(x.with_default_tag()), + Immediate::ScalarPair(x, y) => + Immediate::ScalarPair(x.with_default_tag(), y.with_default_tag()), } } } -impl<'tcx, Tag> Value { +impl<'tcx, Tag> Immediate { #[inline] - pub fn erase_tag(self) -> Value + pub fn erase_tag(self) -> Immediate { match self { - Value::Scalar(x) => Value::Scalar(x.erase_tag()), - Value::ScalarPair(x, y) => - Value::ScalarPair(x.erase_tag(), y.erase_tag()), + Immediate::Scalar(x) => Immediate::Scalar(x.erase_tag()), + Immediate::ScalarPair(x, y) => + Immediate::ScalarPair(x.erase_tag(), y.erase_tag()), } } pub fn new_slice( val: Scalar, len: u64, - cx: impl HasDataLayout + cx: &impl HasDataLayout ) -> Self { - Value::ScalarPair(val.into(), Scalar::from_uint(len, cx.data_layout().pointer_size).into()) + Immediate::ScalarPair( + val.into(), + Scalar::from_uint(len, cx.data_layout().pointer_size).into(), + ) } pub fn new_dyn_trait(val: Scalar, vtable: Pointer) -> Self { - Value::ScalarPair(val.into(), Scalar::Ptr(vtable).into()) + Immediate::ScalarPair(val.into(), Scalar::Ptr(vtable).into()) } #[inline] pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef { match self { - Value::Scalar(val) => val, - Value::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"), + Immediate::Scalar(val) => val, + Immediate::ScalarPair(..) => bug!("Got a fat pointer where a scalar was expected"), } } @@ -203,35 +92,45 @@ impl<'tcx, Tag> Value { #[inline] pub fn to_scalar_pair(self) -> EvalResult<'tcx, (Scalar, Scalar)> { match self { - Value::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"), - Value::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)) + Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"), + Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)) } } - /// Convert the value into a pointer (or a pointer-sized integer). + /// Convert the immediate into a pointer (or a pointer-sized integer). /// Throws away the second half of a ScalarPair! #[inline] pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { match self { - Value::Scalar(ptr) | - Value::ScalarPair(ptr, _) => ptr.not_undef(), + Immediate::Scalar(ptr) | + Immediate::ScalarPair(ptr, _) => ptr.not_undef(), } } + + /// Convert the value into its metadata. + /// Throws away the first half of a ScalarPair! + #[inline] + pub fn to_meta(self) -> EvalResult<'tcx, Option>> { + Ok(match self { + Immediate::Scalar(_) => None, + Immediate::ScalarPair(_, meta) => Some(meta.not_undef()?), + }) + } } -// ScalarPair needs a type to interpret, so we often have a value and a type together +// ScalarPair needs a type to interpret, so we often have an immediate and a type together // as input for binary and cast operations. #[derive(Copy, Clone, Debug)] -pub struct ValTy<'tcx, Tag=()> { - value: Value, +pub struct ImmTy<'tcx, Tag=()> { + immediate: Immediate, pub layout: TyLayout<'tcx>, } -impl<'tcx, Tag> ::std::ops::Deref for ValTy<'tcx, Tag> { - type Target = Value; +impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> { + type Target = Immediate; #[inline(always)] - fn deref(&self) -> &Value { - &self.value + fn deref(&self) -> &Immediate { + &self.immediate } } @@ -240,7 +139,7 @@ impl<'tcx, Tag> ::std::ops::Deref for ValTy<'tcx, Tag> { /// memory and to avoid having to store arbitrary-sized data here. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum Operand { - Immediate(Value), + Immediate(Immediate), Indirect(MemPlace), } @@ -278,11 +177,11 @@ impl Operand { } #[inline] - pub fn to_immediate(self) -> Value + pub fn to_immediate(self) -> Immediate where Tag: ::std::fmt::Debug { match self { - Operand::Immediate(val) => val, + Operand::Immediate(imm) => imm, _ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self), } @@ -313,11 +212,11 @@ impl<'tcx, Tag: Copy> From> for OpTy<'tcx, Tag> { } } -impl<'tcx, Tag> From> for OpTy<'tcx, Tag> { +impl<'tcx, Tag> From> for OpTy<'tcx, Tag> { #[inline(always)] - fn from(val: ValTy<'tcx, Tag>) -> Self { + fn from(val: ImmTy<'tcx, Tag>) -> Self { OpTy { - op: Operand::Immediate(val.value), + op: Operand::Immediate(val.immediate), layout: val.layout } } @@ -357,12 +256,12 @@ fn from_known_layout<'tcx>( } impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// Try reading a value in memory; this is interesting particularly for ScalarPair. + /// Try reading an immediate in memory; this is interesting particularly for ScalarPair. /// Return None if the layout does not permit loading this as a value. - pub(super) fn try_read_value_from_mplace( + pub(super) fn try_read_immediate_from_mplace( &self, mplace: MPlaceTy<'tcx, M::PointerTag>, - ) -> EvalResult<'tcx, Option>> { + ) -> EvalResult<'tcx, Option>> { if mplace.layout.is_unsized() { // Don't touch unsized return Ok(None); @@ -373,43 +272,53 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // Not all ZSTs have a layout we would handle below, so just short-circuit them // all here. self.memory.check_align(ptr, ptr_align)?; - return Ok(Some(Value::Scalar(Scalar::zst().into()))); + return Ok(Some(Immediate::Scalar(Scalar::zst().into()))); } + // check for integer pointers before alignment to report better errors let ptr = ptr.to_ptr()?; + self.memory.check_align(ptr.into(), ptr_align)?; match mplace.layout.abi { layout::Abi::Scalar(..) => { - let scalar = self.memory.read_scalar(ptr, ptr_align, mplace.layout.size)?; - Ok(Some(Value::Scalar(scalar))) + let scalar = self.memory + .get(ptr.alloc_id)? + .read_scalar(self, ptr, mplace.layout.size)?; + Ok(Some(Immediate::Scalar(scalar))) } layout::Abi::ScalarPair(ref a, ref b) => { let (a, b) = (&a.value, &b.value); let (a_size, b_size) = (a.size(self), b.size(self)); let a_ptr = ptr; - let b_offset = a_size.abi_align(b.align(self)); + let b_offset = a_size.align_to(b.align(self).abi); assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use - let b_ptr = ptr.offset(b_offset, self)?.into(); - let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; - let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?; - Ok(Some(Value::ScalarPair(a_val, b_val))) + let b_ptr = ptr.offset(b_offset, self)?; + let a_val = self.memory + .get(ptr.alloc_id)? + .read_scalar(self, a_ptr, a_size)?; + let b_align = ptr_align.restrict_for_offset(b_offset); + self.memory.check_align(b_ptr.into(), b_align)?; + let b_val = self.memory + .get(ptr.alloc_id)? + .read_scalar(self, b_ptr, b_size)?; + Ok(Some(Immediate::ScalarPair(a_val, b_val))) } _ => Ok(None), } } - /// Try returning an immediate value for the operand. - /// If the layout does not permit loading this as a value, return where in memory + /// Try returning an immediate for the operand. + /// If the layout does not permit loading this as an immediate, return where in memory /// we can find the data. /// Note that for a given layout, this operation will either always fail or always /// succeed! Whether it succeeds depends on whether the layout can be represented - /// in a `Value`, not on which data is stored there currently. - pub(crate) fn try_read_value( + /// in a `Immediate`, not on which data is stored there currently. + pub(crate) fn try_read_immediate( &self, src: OpTy<'tcx, M::PointerTag>, - ) -> EvalResult<'tcx, Result, MemPlace>> { + ) -> EvalResult<'tcx, Result, MemPlace>> { Ok(match src.try_as_mplace() { Ok(mplace) => { - if let Some(val) = self.try_read_value_from_mplace(mplace)? { + if let Some(val) = self.try_read_immediate_from_mplace(mplace)? { Ok(val) } else { Err(*mplace) @@ -419,14 +328,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> }) } - /// Read a value from a place, asserting that that is possible with the given layout. + /// Read an immediate from a place, asserting that that is possible with the given layout. #[inline(always)] - pub fn read_value( + pub fn read_immediate( &self, op: OpTy<'tcx, M::PointerTag> - ) -> EvalResult<'tcx, ValTy<'tcx, M::PointerTag>> { - if let Ok(value) = self.try_read_value(op)? { - Ok(ValTy { value, layout: op.layout }) + ) -> EvalResult<'tcx, ImmTy<'tcx, M::PointerTag>> { + if let Ok(immediate) = self.try_read_immediate(op)? { + Ok(ImmTy { immediate, layout: op.layout }) } else { bug!("primitive read failed for type: {:?}", op.layout.ty); } @@ -437,10 +346,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> &self, op: OpTy<'tcx, M::PointerTag> ) -> EvalResult<'tcx, ScalarMaybeUndef> { - match *self.read_value(op)? { - Value::ScalarPair(..) => bug!("got ScalarPair for type: {:?}", op.layout.ty), - Value::Scalar(val) => Ok(val), - } + Ok(self.read_immediate(op)?.to_scalar_or_undef()) } // Turn the MPlace into a string (must already be dereferenced!) @@ -460,16 +366,16 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> layout: TyLayout<'tcx> ) -> EvalResult<'tcx, Operand> { // This decides which types we will use the Immediate optimization for, and hence should - // match what `try_read_value` and `eval_place_to_op` support. + // match what `try_read_immediate` and `eval_place_to_op` support. if layout.is_zst() { - return Ok(Operand::Immediate(Value::Scalar(Scalar::zst().into()))); + return Ok(Operand::Immediate(Immediate::Scalar(Scalar::zst().into()))); } Ok(match layout.abi { layout::Abi::Scalar(..) => - Operand::Immediate(Value::Scalar(ScalarMaybeUndef::Undef)), + Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef)), layout::Abi::ScalarPair(..) => - Operand::Immediate(Value::ScalarPair( + Operand::Immediate(Immediate::ScalarPair( ScalarMaybeUndef::Undef, ScalarMaybeUndef::Undef, )), @@ -500,28 +406,28 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let field = field.try_into().unwrap(); let field_layout = op.layout.field(self, field)?; if field_layout.is_zst() { - let val = Value::Scalar(Scalar::zst().into()); - return Ok(OpTy { op: Operand::Immediate(val), layout: field_layout }); + let immediate = Immediate::Scalar(Scalar::zst().into()); + return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }); } let offset = op.layout.fields.offset(field); - let value = match base { + let immediate = match base { // the field covers the entire type _ if offset.bytes() == 0 && field_layout.size == op.layout.size => base, // extract fields from types with `ScalarPair` ABI - Value::ScalarPair(a, b) => { + Immediate::ScalarPair(a, b) => { let val = if offset.bytes() == 0 { a } else { b }; - Value::Scalar(val) + Immediate::Scalar(val) }, - Value::Scalar(val) => + Immediate::Scalar(val) => bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout), }; - Ok(OpTy { op: Operand::Immediate(value), layout: field_layout }) + Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout }) } pub fn operand_downcast( &self, op: OpTy<'tcx, M::PointerTag>, - variant: usize, + variant: VariantIdx, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { // Downcasts only change the layout Ok(match op.try_as_mplace() { @@ -535,17 +441,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> }) } - // Take an operand, representing a pointer, and dereference it to a place -- that - // will always be a MemPlace. - pub(super) fn deref_operand( - &self, - src: OpTy<'tcx, M::PointerTag>, - ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - let val = self.read_value(src)?; - trace!("deref to {} on {:?}", val.layout.ty, *val); - Ok(self.ref_to_mplace(val)?) - } - pub fn operand_projection( &self, base: OpTy<'tcx, M::PointerTag>, @@ -558,7 +453,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> Deref => self.deref_operand(base)?.into(), Subslice { .. } | ConstantIndex { .. } | Index(_) => if base.layout.is_zst() { OpTy { - op: Operand::Immediate(Value::Scalar(Scalar::zst().into())), + op: Operand::Immediate(Immediate::Scalar(Scalar::zst().into())), // the actual index doesn't matter, so we just pick a convenient one like 0 layout: base.layout.field(self, 0)?, } @@ -576,7 +471,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> /// When you know the layout of the local in advance, you can pass it as last argument pub fn access_local( &self, - frame: &super::Frame<'mir, 'tcx, M::PointerTag>, + frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, local: mir::Local, layout: Option>, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { @@ -650,8 +545,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> .collect() } - // Also used e.g. when miri runs into a constant. - pub(super) fn const_value_to_op( + // Used when miri runs into a constant, and by CTFE. + // FIXME: CTFE should use allocations, then we can make this private (embed it into + // `eval_operand`, ideally). + pub(crate) fn const_value_to_op( &self, val: ConstValue<'tcx>, ) -> EvalResult<'tcx, Operand> { @@ -659,10 +556,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> match val { ConstValue::Unevaluated(def_id, substs) => { let instance = self.resolve(def_id, substs)?; - self.global_to_op(GlobalId { + Ok(*OpTy::from(self.const_eval_raw(GlobalId { instance, promoted: None, - }) + })?)) } ConstValue::ByRef(id, alloc, offset) => { // We rely on mutability being set correctly in that allocation to prevent writes @@ -672,38 +569,26 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> ).with_default_tag()) }, ConstValue::ScalarPair(a, b) => - Ok(Operand::Immediate(Value::ScalarPair(a.into(), b.into())).with_default_tag()), + Ok(Operand::Immediate(Immediate::ScalarPair( + a.into(), + b.into(), + )).with_default_tag()), ConstValue::Scalar(x) => - Ok(Operand::Immediate(Value::Scalar(x.into())).with_default_tag()), + Ok(Operand::Immediate(Immediate::Scalar(x.into())).with_default_tag()), } } - pub fn const_to_op( - &self, - cnst: &ty::Const<'tcx>, - ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { - let op = self.const_value_to_op(cnst.val)?; - Ok(OpTy { op, layout: self.layout_of(cnst.ty)? }) - } - - pub(super) fn global_to_op( - &self, - gid: GlobalId<'tcx> - ) -> EvalResult<'tcx, Operand> { - let cv = self.const_eval(gid)?; - self.const_value_to_op(cv.val) - } /// Read discriminant, return the runtime value as well as the variant index. pub fn read_discriminant( &self, rval: OpTy<'tcx, M::PointerTag>, - ) -> EvalResult<'tcx, (u128, usize)> { + ) -> EvalResult<'tcx, (u128, VariantIdx)> { trace!("read_discriminant_value {:#?}", rval.layout); match rval.layout.variants { layout::Variants::Single { index } => { let discr_val = rval.layout.ty.ty_adt_def().map_or( - index as u128, + index.as_u32() as u128, |def| def.discriminant_for_variant(*self.tcx, index).val); return Ok((discr_val, index)); } @@ -712,15 +597,19 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } // read raw discriminant value let discr_op = self.operand_field(rval, 0)?; - let discr_val = self.read_value(discr_op)?; - let raw_discr = discr_val.to_scalar()?; + let discr_val = self.read_immediate(discr_op)?; + let raw_discr = discr_val.to_scalar_or_undef(); trace!("discr value: {:?}", raw_discr); // post-process Ok(match rval.layout.variants { layout::Variants::Single { .. } => bug!(), layout::Variants::Tagged { .. } => { + let bits_discr = match raw_discr.to_bits(discr_val.layout.size) { + Ok(raw_discr) => raw_discr, + Err(_) => return err!(InvalidDiscriminant(raw_discr.erase_tag())), + }; let real_discr = if discr_val.layout.ty.is_signed() { - let i = raw_discr.to_bits(discr_val.layout.size)? as i128; + let i = bits_discr as i128; // going from layout tag type to typeck discriminant type // requires first sign extending with the layout discriminant let shift = 128 - discr_val.layout.size.bits(); @@ -730,21 +619,21 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> .ty_adt_def().expect("tagged layout corresponds to adt") .repr .discr_type(); - let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty); + let discr_ty = layout::Integer::from_attr(self, discr_ty); let shift = 128 - discr_ty.size().bits(); let truncatee = sexted as u128; (truncatee << shift) >> shift } else { - raw_discr.to_bits(discr_val.layout.size)? + bits_discr }; // Make sure we catch invalid discriminants let index = rval.layout.ty .ty_adt_def() .expect("tagged layout for non adt") .discriminants(self.tcx.tcx) - .position(|var| var.val == real_discr) - .ok_or_else(|| EvalErrorKind::InvalidDiscriminant(real_discr))?; - (real_discr, index) + .find(|(_, var)| var.val == real_discr) + .ok_or_else(|| EvalErrorKind::InvalidDiscriminant(raw_discr.erase_tag()))?; + (real_discr, index.0) }, layout::Variants::NicheFilling { dataful_variant, @@ -752,33 +641,37 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> niche_start, .. } => { - let variants_start = *niche_variants.start() as u128; - let variants_end = *niche_variants.end() as u128; - let real_discr = match raw_discr { - Scalar::Ptr(_) => { - // The niche must be just 0 (which a pointer value never is) - assert!(niche_start == 0); - assert!(variants_start == variants_end); - dataful_variant as u128 + let variants_start = niche_variants.start().as_u32() as u128; + let variants_end = niche_variants.end().as_u32() as u128; + match raw_discr { + ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => { + // The niche must be just 0 (which an inbounds pointer value never is) + let ptr_valid = niche_start == 0 && variants_start == variants_end && + self.memory.check_bounds_ptr_maybe_dead(ptr).is_ok(); + if !ptr_valid { + return err!(InvalidDiscriminant(raw_discr.erase_tag())); + } + (dataful_variant.as_u32() as u128, dataful_variant) }, - Scalar::Bits { bits: raw_discr, size } => { + ScalarMaybeUndef::Scalar(Scalar::Bits { bits: raw_discr, size }) => { assert_eq!(size as u64, discr_val.layout.size.bytes()); - let discr = raw_discr.wrapping_sub(niche_start) + let adjusted_discr = raw_discr.wrapping_sub(niche_start) .wrapping_add(variants_start); - if variants_start <= discr && discr <= variants_end { - discr + if variants_start <= adjusted_discr && adjusted_discr <= variants_end { + let index = adjusted_discr as usize; + assert_eq!(index as u128, adjusted_discr); + assert!(index < rval.layout.ty + .ty_adt_def() + .expect("tagged layout for non adt") + .variants.len()); + (adjusted_discr, VariantIdx::from_usize(index)) } else { - dataful_variant as u128 + (dataful_variant.as_u32() as u128, dataful_variant) } }, - }; - let index = real_discr as usize; - assert_eq!(index as u128, real_discr); - assert!(index < rval.layout.ty - .ty_adt_def() - .expect("tagged layout for non adt") - .variants.len()); - (real_discr, index) + ScalarMaybeUndef::Undef => + return err!(InvalidDiscriminant(ScalarMaybeUndef::Undef)), + } } }) } diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 5f4bafc39f..31824d5ec4 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -15,7 +15,7 @@ use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; use rustc::mir::interpret::{EvalResult, Scalar}; -use super::{EvalContext, PlaceTy, Value, Machine, ValTy}; +use super::{EvalContext, PlaceTy, Immediate, Machine, ImmTy}; impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { @@ -24,13 +24,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn binop_with_overflow( &mut self, op: mir::BinOp, - left: ValTy<'tcx, M::PointerTag>, - right: ValTy<'tcx, M::PointerTag>, + left: ImmTy<'tcx, M::PointerTag>, + right: ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - let (val, overflowed) = self.binary_op_val(op, left, right)?; - let val = Value::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); - self.write_value(val, dest) + let (val, overflowed) = self.binary_op_imm(op, left, right)?; + let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); + self.write_immediate(val, dest) } /// Applies the binary operation `op` to the arguments and writes the result to the @@ -38,11 +38,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn binop_ignore_overflow( &mut self, op: mir::BinOp, - left: ValTy<'tcx, M::PointerTag>, - right: ValTy<'tcx, M::PointerTag>, + left: ImmTy<'tcx, M::PointerTag>, + right: ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - let (val, _overflowed) = self.binary_op_val(op, left, right)?; + let (val, _overflowed) = self.binary_op_imm(op, left, right)?; self.write_scalar(val, dest) } } @@ -283,13 +283,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } /// Convenience wrapper that's useful when keeping the layout together with the - /// value. + /// immediate value. #[inline] - pub fn binary_op_val( + pub fn binary_op_imm( &self, bin_op: mir::BinOp, - left: ValTy<'tcx, M::PointerTag>, - right: ValTy<'tcx, M::PointerTag>, + left: ImmTy<'tcx, M::PointerTag>, + right: ImmTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, (Scalar, bool)> { self.binary_op( bin_op, diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index a4bb15662d..1b47530eae 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -15,16 +15,15 @@ use std::convert::TryFrom; use std::hash::Hash; +use rustc::hir; use rustc::mir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout}; +use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout, VariantIdx}; -use rustc::mir::interpret::{ - GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic -}; use super::{ - EvalContext, Machine, AllocMap, - Value, ValTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind + GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic, + EvalContext, Machine, AllocMap, AllocationExtra, + RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind }; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] @@ -125,6 +124,12 @@ impl MemPlace { } } + /// Produces a Place that will error if attempted to be read from or written to + #[inline(always)] + pub fn null(cx: &impl HasDataLayout) -> Self { + Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1).unwrap()) + } + #[inline(always)] pub fn from_ptr(ptr: Pointer, align: Align) -> Self { Self::from_scalar_ptr(ptr.into(), align) @@ -144,28 +149,64 @@ impl MemPlace { // it now must be aligned. self.to_scalar_ptr_align().0.to_ptr() } + + /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space. + /// This is the inverse of `ref_to_mplace`. + #[inline(always)] + pub fn to_ref(self) -> Immediate { + match self.meta { + None => Immediate::Scalar(self.ptr.into()), + Some(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()), + } + } + + pub fn offset( + self, + offset: Size, + meta: Option>, + cx: &impl HasDataLayout, + ) -> EvalResult<'tcx, Self> { + Ok(MemPlace { + ptr: self.ptr.ptr_offset(offset, cx)?, + align: self.align.restrict_for_offset(offset), + meta, + }) + } } impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { /// Produces a MemPlace that works for ZST but nothing else #[inline] - pub fn dangling(layout: TyLayout<'tcx>, cx: impl HasDataLayout) -> Self { + pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self { MPlaceTy { mplace: MemPlace::from_scalar_ptr( - Scalar::from_uint(layout.align.abi(), cx.pointer_size()), - layout.align + Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()), + layout.align.abi ), layout } } - #[inline] - fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { - MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout } + pub fn offset( + self, + offset: Size, + meta: Option>, + layout: TyLayout<'tcx>, + cx: &impl HasDataLayout, + ) -> EvalResult<'tcx, Self> { + Ok(MPlaceTy { + mplace: self.mplace.offset(offset, meta, cx)?, + layout, + }) } #[inline] - pub(super) fn len(self, cx: impl HasDataLayout) -> EvalResult<'tcx, u64> { + fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { + MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } + } + + #[inline] + pub(super) fn len(self, cx: &impl HasDataLayout) -> EvalResult<'tcx, u64> { if self.layout.is_unsized() { // We need to consult `meta` metadata match self.layout.ty.sty { @@ -194,10 +235,10 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> { #[inline(always)] - pub fn try_as_mplace(self) -> Result, Value> { + pub fn try_as_mplace(self) -> Result, Immediate> { match self.op { Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), - Operand::Immediate(value) => Err(value), + Operand::Immediate(imm) => Err(imm), } } @@ -209,17 +250,17 @@ impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> { impl<'tcx, Tag: ::std::fmt::Debug> Place { /// Produces a Place that will error if attempted to be read from or written to - #[inline] - pub fn null(cx: impl HasDataLayout) -> Self { - Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap()) + #[inline(always)] + pub fn null(cx: &impl HasDataLayout) -> Self { + Place::Ptr(MemPlace::null(cx)) } - #[inline] + #[inline(always)] pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { Place::Ptr(MemPlace::from_scalar_ptr(ptr, align)) } - #[inline] + #[inline(always)] pub fn from_ptr(ptr: Pointer, align: Align) -> Self { Place::Ptr(MemPlace::from_ptr(ptr, align)) } @@ -254,59 +295,53 @@ impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> { // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 impl<'a, 'mir, 'tcx, Tag, M> EvalContext<'a, 'mir, 'tcx, M> where + // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 Tag: ::std::fmt::Debug+Default+Copy+Eq+Hash+'static, M: Machine<'a, 'mir, 'tcx, PointerTag=Tag>, + // FIXME: Working around https://github.com/rust-lang/rust/issues/24159 M::MemoryMap: AllocMap, Allocation)>, + M::AllocExtra: AllocationExtra, { /// Take a value, which represents a (thin or fat) reference, and make it a place. - /// Alignment is just based on the type. This is the inverse of `create_ref`. + /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. + /// This does NOT call the "deref" machine hook, so it does NOT count as a + /// deref as far as Stacked Borrows is concerned. Use `deref_operand` for that! pub fn ref_to_mplace( &self, - val: ValTy<'tcx, M::PointerTag>, + val: ImmTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - let ptr = match val.to_scalar_ptr()? { - Scalar::Ptr(ptr) if M::ENABLE_PTR_TRACKING_HOOKS => { - // Machine might want to track the `*` operator - let tag = M::tag_dereference(self, ptr, val.layout.ty)?; - Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)) - } - other => other, - }; - let pointee_type = val.layout.ty.builtin_deref(true).unwrap().ty; let layout = self.layout_of(pointee_type)?; - let align = layout.align; - let mplace = match *val { - Value::Scalar(_) => - MemPlace { ptr, align, meta: None }, - Value::ScalarPair(_, meta) => - MemPlace { ptr, align, meta: Some(meta.not_undef()?) }, + let mplace = MemPlace { + ptr: val.to_scalar_ptr()?, + align: layout.align.abi, + meta: val.to_meta()?, }; Ok(MPlaceTy { mplace, layout }) } - /// Turn a mplace into a (thin or fat) pointer, as a reference, pointing to the same space. - /// This is the inverse of `ref_to_mplace`. - pub fn create_ref( - &mut self, - place: MPlaceTy<'tcx, M::PointerTag>, - borrow_kind: Option, - ) -> EvalResult<'tcx, Value> { - let ptr = match place.ptr { - Scalar::Ptr(ptr) if M::ENABLE_PTR_TRACKING_HOOKS => { - // Machine might want to track the `&` operator - let (size, _) = self.size_and_align_of_mplace(place)? - .expect("create_ref cannot determine size"); - let tag = M::tag_reference(self, ptr, place.layout.ty, size, borrow_kind)?; - Scalar::Ptr(Pointer::new_with_tag(ptr.alloc_id, ptr.offset, tag)) - }, - other => other, + // Take an operand, representing a pointer, and dereference it to a place -- that + // will always be a MemPlace. Lives in `place.rs` because it creates a place. + // This calls the "deref" machine hook, and counts as a deref as far as + // Stacked Borrows is concerned. + pub fn deref_operand( + &self, + src: OpTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { + let val = self.read_immediate(src)?; + trace!("deref to {} on {:?}", val.layout.ty, *val); + let mut place = self.ref_to_mplace(val)?; + // Pointer tag tracking might want to adjust the tag. + let mutbl = match val.layout.ty.sty { + // `builtin_deref` considers boxes immutable, that's useless for our purposes + ty::Ref(_, _, mutbl) => Some(mutbl), + ty::Adt(def, _) if def.is_box() => Some(hir::MutMutable), + ty::RawPtr(_) => None, + _ => bug!("Unexpected pointer type {}", val.layout.ty.sty), }; - Ok(match place.meta { - None => Value::Scalar(ptr.into()), - Some(meta) => Value::ScalarPair(ptr.into(), meta.into()), - }) + place.mplace.ptr = M::tag_dereference(self, place, mutbl)?; + Ok(place) } /// Offset a pointer to project to a field. Unlike place_field, this is always @@ -342,22 +377,27 @@ where // Offset may need adjustment for unsized fields let (meta, offset) = if field_layout.is_unsized() { // re-use parent metadata to determine dynamic field layout - let (_, align) = self.size_and_align_of(base.meta, field_layout)? - .expect("Fields cannot be extern types"); - (base.meta, offset.abi_align(align)) + let align = match self.size_and_align_of(base.meta, field_layout)? { + Some((_, align)) => align, + None if offset == Size::ZERO => + // An extern type at offset 0, we fall back to its static alignment. + // FIXME: Once we have made decisions for how to handle size and alignment + // of `extern type`, this should be adapted. It is just a temporary hack + // to get some code to work that probably ought to work. + field_layout.align.abi, + None => + bug!("Cannot compute offset for extern type field at non-0 offset"), + }; + (base.meta, offset.align_to(align)) } else { // base.meta could be present; we might be accessing a sized field of an unsized // struct. (None, offset) }; - let ptr = base.ptr.ptr_offset(offset, self)?; - let align = base.align - // We do not look at `base.layout.align` nor `field_layout.align`, unlike - // codegen -- mostly to see if we can get away with that - .restrict_for_offset(offset); // must be last thing that happens - - Ok(MPlaceTy { mplace: MemPlace { ptr, align, meta }, layout: field_layout }) + // We do not look at `base.layout.align` nor `field_layout.align`, unlike + // codegen -- mostly to see if we can get away with that + base.offset(offset, meta, field_layout, self) } // Iterates over all fields of an array. Much more efficient than doing the @@ -375,13 +415,7 @@ where }; let layout = base.layout.field(self, 0)?; let dl = &self.tcx.data_layout; - Ok((0..len).map(move |i| { - let ptr = base.ptr.ptr_offset(i * stride, dl)?; - Ok(MPlaceTy { - mplace: MemPlace { ptr, align: base.align, meta: None }, - layout - }) - })) + Ok((0..len).map(move |i| base.offset(i * stride, None, layout, dl))) } pub fn mplace_subslice( @@ -400,7 +434,6 @@ where stride * from, _ => bug!("Unexpected layout of index access: {:#?}", base.layout), }; - let ptr = base.ptr.ptr_offset(from_offset, self)?; // Compute meta and new layout let inner_len = len - to - from; @@ -417,17 +450,13 @@ where bug!("cannot subslice non-array type: `{:?}`", base.layout.ty), }; let layout = self.layout_of(ty)?; - - Ok(MPlaceTy { - mplace: MemPlace { ptr, align: base.align, meta }, - layout - }) + base.offset(from_offset, meta, layout, self) } pub fn mplace_downcast( &self, base: MPlaceTy<'tcx, M::PointerTag>, - variant: usize, + variant: VariantIdx, ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { // Downcasts only change the layout assert!(base.meta.is_none()); @@ -478,6 +507,8 @@ where /// Get the place of a field inside the place, and also the field's type. /// Just a convenience function, but used quite a bit. + /// This is the only projection that might have a side-effect: We cannot project + /// into the field of a local `ScalarPair`, we have to first allocate it. pub fn place_field( &mut self, base: PlaceTy<'tcx, M::PointerTag>, @@ -490,16 +521,16 @@ where } pub fn place_downcast( - &mut self, + &self, base: PlaceTy<'tcx, M::PointerTag>, - variant: usize, + variant: VariantIdx, ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { // Downcast just changes the layout Ok(match base.place { Place::Ptr(mplace) => self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into(), Place::Local { .. } => { - let layout = base.layout.for_variant(&self, variant); + let layout = base.layout.for_variant(self, variant); PlaceTy { layout, ..base } } }) @@ -535,16 +566,10 @@ where Ok(match *mir_place { Promoted(ref promoted) => { let instance = self.frame().instance; - let op = self.global_to_op(GlobalId { + self.const_eval_raw(GlobalId { instance, promoted: Some(promoted.0), - })?; - let mplace = op.to_mem_place(); // these are always in memory - let ty = self.monomorphize(promoted.1, self.substs()); - MPlaceTy { - mplace, - layout: self.layout_of(ty)?, - } + })? } Static(ref static_) => { @@ -618,60 +643,60 @@ where val: impl Into>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - self.write_value(Value::Scalar(val.into()), dest) + self.write_immediate(Immediate::Scalar(val.into()), dest) } - /// Write a value to a place + /// Write an immediate to a place #[inline(always)] - pub fn write_value( + pub fn write_immediate( &mut self, - src_val: Value, + src: Immediate, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - self.write_value_no_validate(src_val, dest)?; + self.write_immediate_no_validate(src, dest)?; if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(self.place_to_op(dest)?, &mut vec![], None, /*const_mode*/false)?; + self.validate_operand(self.place_to_op(dest)?, vec![], None, /*const_mode*/false)?; } Ok(()) } - /// Write a value to a place. + /// Write an immediate to a place. /// If you use this you are responsible for validating that things got copied at the /// right type. - fn write_value_no_validate( + fn write_immediate_no_validate( &mut self, - src_val: Value, + src: Immediate, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { if cfg!(debug_assertions) { // This is a very common path, avoid some checks in release mode assert!(!dest.layout.is_unsized(), "Cannot write unsized data"); - match src_val { - Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) => + match src { + Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) => assert_eq!(self.pointer_size(), dest.layout.size, "Size mismatch when writing pointer"), - Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size, .. })) => + Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { size, .. })) => assert_eq!(Size::from_bytes(size.into()), dest.layout.size, "Size mismatch when writing bits"), - Value::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size - Value::ScalarPair(_, _) => { + Immediate::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size + Immediate::ScalarPair(_, _) => { // FIXME: Can we check anything here? } } } - trace!("write_value: {:?} <- {:?}: {}", *dest, src_val, dest.layout.ty); + trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); - // See if we can avoid an allocation. This is the counterpart to `try_read_value`, + // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`, // but not factored as a separate function. let mplace = match dest.place { Place::Local { frame, local } => { match *self.stack[frame].locals[local].access_mut()? { Operand::Immediate(ref mut dest_val) => { // Yay, we can just change the local directly. - *dest_val = src_val; + *dest_val = src; return Ok(()); }, Operand::Indirect(mplace) => mplace, // already in memory @@ -682,15 +707,15 @@ where let dest = MPlaceTy { mplace, layout: dest.layout }; // This is already in memory, write there. - self.write_value_to_mplace_no_validate(src_val, dest) + self.write_immediate_to_mplace_no_validate(src, dest) } - /// Write a value to memory. + /// Write an immediate to memory. /// If you use this you are responsible for validating that things git copied at the /// right type. - fn write_value_to_mplace_no_validate( + fn write_immediate_to_mplace_no_validate( &mut self, - value: Value, + value: Immediate, dest: MPlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { let (ptr, ptr_align) = dest.to_scalar_ptr_align(); @@ -701,43 +726,50 @@ where // Nothing to do for ZSTs, other than checking alignment if dest.layout.is_zst() { - self.memory.check_align(ptr, ptr_align)?; - return Ok(()); + return self.memory.check_align(ptr, ptr_align); } + // check for integer pointers before alignment to report better errors let ptr = ptr.to_ptr()?; + self.memory.check_align(ptr.into(), ptr_align)?; + let tcx = &*self.tcx; // FIXME: We should check that there are dest.layout.size many bytes available in // memory. The code below is not sufficient, with enough padding it might not // cover all the bytes! match value { - Value::Scalar(scalar) => { + Immediate::Scalar(scalar) => { match dest.layout.abi { layout::Abi::Scalar(_) => {}, // fine - _ => bug!("write_value_to_mplace: invalid Scalar layout: {:#?}", + _ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}", dest.layout) } - - self.memory.write_scalar( - ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size + self.memory.get_mut(ptr.alloc_id)?.write_scalar( + tcx, ptr, scalar, dest.layout.size ) } - Value::ScalarPair(a_val, b_val) => { + Immediate::ScalarPair(a_val, b_val) => { let (a, b) = match dest.layout.abi { layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), - _ => bug!("write_value_to_mplace: invalid ScalarPair layout: {:#?}", + _ => bug!("write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", dest.layout) }; - let (a_size, b_size) = (a.size(&self), b.size(&self)); - let (a_align, b_align) = (a.align(&self), b.align(&self)); - let b_offset = a_size.abi_align(b_align); - let b_ptr = ptr.offset(b_offset, &self)?.into(); + let (a_size, b_size) = (a.size(self), b.size(self)); + let b_offset = a_size.align_to(b.align(self).abi); + let b_align = ptr_align.restrict_for_offset(b_offset); + let b_ptr = ptr.offset(b_offset, self)?; + + self.memory.check_align(b_ptr.into(), b_align)?; // It is tempting to verify `b_offset` against `layout.fields.offset(1)`, // but that does not work: We could be a newtype around a pair, then the // fields do not match the `ScalarPair` components. - self.memory.write_scalar(ptr, ptr_align.min(a_align), a_val, a_size)?; - self.memory.write_scalar(b_ptr, ptr_align.min(b_align), b_val, b_size) + self.memory + .get_mut(ptr.alloc_id)? + .write_scalar(tcx, ptr, a_val, a_size)?; + self.memory + .get_mut(b_ptr.alloc_id)? + .write_scalar(tcx, b_ptr, b_val, b_size) } } } @@ -754,7 +786,7 @@ where if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(self.place_to_op(dest)?, &mut vec![], None, /*const_mode*/false)?; + self.validate_operand(self.place_to_op(dest)?, vec![], None, /*const_mode*/false)?; } Ok(()) @@ -777,10 +809,10 @@ where "Layout mismatch when copying!\nsrc: {:#?}\ndest: {:#?}", src, dest); // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. - let src = match self.try_read_value(src)? { + let src = match self.try_read_immediate(src)? { Ok(src_val) => { // Yay, we got a value that we can write directly. - return self.write_value_no_validate(src_val, dest); + return self.write_immediate_no_validate(src_val, dest); } Err(mplace) => mplace, }; @@ -832,13 +864,15 @@ where if M::enforce_validity(self) { // Data got changed, better make sure it matches the type! - self.validate_operand(dest.into(), &mut vec![], None, /*const_mode*/false)?; + self.validate_operand(dest.into(), vec![], None, /*const_mode*/false)?; } Ok(()) } /// Make sure that a place is in memory, and return where it is. + /// If the place currently refers to a local that doesn't yet have a matching allocation, + /// create such an allocation. /// This is essentially `force_to_memplace`. pub fn force_allocation( &mut self, @@ -860,7 +894,7 @@ where let ptr = self.allocate(local_layout, MemoryKind::Stack)?; // We don't have to validate as we can assume the local // was already valid for its type. - self.write_value_to_mplace_no_validate(value, ptr)?; + self.write_immediate_to_mplace_no_validate(value, ptr)?; let mplace = ptr.mplace; // Update the local *self.stack[frame].locals[local].access_mut()? = @@ -880,14 +914,20 @@ where layout: TyLayout<'tcx>, kind: MemoryKind, ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - assert!(!layout.is_unsized(), "cannot alloc memory for unsized type"); - let ptr = self.memory.allocate(layout.size, layout.align, kind)?; - Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) + if layout.is_unsized() { + assert!(self.tcx.features().unsized_locals, "cannot alloc memory for unsized type"); + // FIXME: What should we do here? We should definitely also tag! + Ok(MPlaceTy::dangling(layout, self)) + } else { + let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?; + let ptr = M::tag_new_allocation(self, ptr, kind)?; + Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) + } } pub fn write_discriminant_index( &mut self, - variant_index: usize, + variant_index: VariantIdx, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { match dest.layout.variants { @@ -896,7 +936,7 @@ where } layout::Variants::Tagged { ref tag, .. } => { let adt_def = dest.layout.ty.ty_adt_def().unwrap(); - assert!(variant_index < adt_def.variants.len()); + assert!(variant_index.as_usize() < adt_def.variants.len()); let discr_val = adt_def .discriminant_for_variant(*self.tcx, variant_index) .val; @@ -904,7 +944,7 @@ where // raw discriminants for enums are isize or bigger during // their computation, but the in-memory tag is the smallest possible // representation - let size = tag.value.size(self.tcx.tcx); + let size = tag.value.size(self); let shift = 128 - size.bits(); let discr_val = (discr_val << shift) >> shift; @@ -917,11 +957,14 @@ where niche_start, .. } => { - assert!(variant_index < dest.layout.ty.ty_adt_def().unwrap().variants.len()); + assert!( + variant_index.as_usize() < dest.layout.ty.ty_adt_def().unwrap().variants.len(), + ); if variant_index != dataful_variant { let niche_dest = self.place_field(dest, 0)?; - let niche_value = ((variant_index - niche_variants.start()) as u128) + let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); + let niche_value = (niche_value as u128) .wrapping_add(niche_start); self.write_scalar( Scalar::from_uint(niche_value, niche_dest.layout.size), @@ -950,6 +993,19 @@ where Ok(OpTy { op, layout: place.layout }) } + pub fn raw_const_to_mplace( + &self, + raw: RawConst<'tcx>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { + // This must be an allocation in `tcx` + assert!(self.tcx.alloc_map.lock().get(raw.alloc_id).is_some()); + let layout = self.layout_of(raw.ty)?; + Ok(MPlaceTy::from_aligned_ptr( + Pointer::new(raw.alloc_id, Size::ZERO).with_default_tag(), + layout, + )) + } + /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. /// Also return some more information so drop doesn't have to run the same code twice. pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) @@ -962,7 +1018,8 @@ where if cfg!(debug_assertions) { let (size, align) = self.read_size_and_align_from_vtable(vtable)?; assert_eq!(size, layout.size); - assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved + // only ABI alignment is preserved + assert_eq!(align, layout.align.abi); } let mplace = MPlaceTy { diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 047a0125f7..f9ce7b4319 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -6,9 +6,8 @@ // it is not used by the general miri engine, just by CTFE. use std::hash::{Hash, Hasher}; -use std::mem; -use rustc::ich::{StableHashingContext, StableHashingContextProvider}; +use rustc::ich::StableHashingContextProvider; use rustc::mir; use rustc::mir::interpret::{ AllocId, Pointer, Scalar, @@ -20,12 +19,12 @@ use rustc::ty::{self, TyCtxt}; use rustc::ty::layout::Align; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::indexed_vec::IndexVec; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use syntax::ast::Mutability; use syntax::source_map::Span; use super::eval_context::{LocalValue, StackPopCleanup}; -use super::{Frame, Memory, Operand, MemPlace, Place, Value, ScalarMaybeUndef}; +use super::{Frame, Memory, Operand, MemPlace, Place, Immediate, ScalarMaybeUndef}; use const_eval::CompileTimeInterpreter; #[derive(Default)] @@ -196,11 +195,6 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar } } -impl_stable_hash_for!(enum ::interpret::ScalarMaybeUndef { - Scalar(v), - Undef -}); - impl_snapshot_for!(enum ScalarMaybeUndef { Scalar(s), Undef, @@ -217,23 +211,10 @@ impl_snapshot_for!(struct MemPlace { align -> *align, // just copy alignment verbatim }); -// Can't use the macro here because that does not support named enum fields. -impl<'a> HashStable> for Place { - fn hash_stable( - &self, hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) - { - mem::discriminant(self).hash_stable(hcx, hasher); - match self { - Place::Ptr(mem_place) => mem_place.hash_stable(hcx, hasher), - - Place::Local { frame, local } => { - frame.hash_stable(hcx, hasher); - local.hash_stable(hcx, hasher); - }, - } - } -} +impl_stable_hash_for!(enum ::interpret::Place { + Ptr(mem_place), + Local { frame, local }, +}); impl<'a, Ctx> Snapshot<'a, Ctx> for Place where Ctx: SnapshotContext<'a>, { @@ -251,11 +232,11 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for Place } } -impl_stable_hash_for!(enum ::interpret::Value { +impl_stable_hash_for!(enum ::interpret::Immediate { Scalar(x), ScalarPair(x, y), }); -impl_snapshot_for!(enum Value { +impl_snapshot_for!(enum Immediate { Scalar(s), ScalarPair(s, t), }); @@ -317,20 +298,10 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for &'a Allocation } } -// Can't use the macro here because that does not support named enum fields. -impl<'a> HashStable> for StackPopCleanup { - fn hash_stable( - &self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) - { - mem::discriminant(self).hash_stable(hcx, hasher); - match self { - StackPopCleanup::Goto(ref block) => block.hash_stable(hcx, hasher), - StackPopCleanup::None { cleanup } => cleanup.hash_stable(hcx, hasher), - } - } -} +impl_stable_hash_for!(enum ::interpret::eval_context::StackPopCleanup { + Goto(block), + None { cleanup }, +}); #[derive(Eq, PartialEq)] struct FrameSnapshot<'a, 'tcx: 'a> { @@ -343,28 +314,18 @@ struct FrameSnapshot<'a, 'tcx: 'a> { stmt: usize, } -// Not using the macro because that does not support types depending on two lifetimes -impl<'a, 'mir, 'tcx: 'mir> HashStable> for Frame<'mir, 'tcx> { - fn hash_stable( - &self, - hcx: &mut StableHashingContext<'a>, - hasher: &mut StableHasher) { +impl_stable_hash_for!(impl<'tcx, 'mir: 'tcx> for struct Frame<'mir, 'tcx> { + mir, + instance, + span, + return_to_block, + return_place -> (return_place.as_ref().map(|r| &**r)), + locals, + block, + stmt, + extra, +}); - let Frame { - mir, - instance, - span, - return_to_block, - return_place, - locals, - block, - stmt, - } = self; - - (mir, instance, span, return_to_block).hash_stable(hcx, hasher); - (return_place.as_ref().map(|r| &**r), locals, block, stmt).hash_stable(hcx, hasher); - } -} impl<'a, 'mir, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a Frame<'mir, 'tcx> where Ctx: SnapshotContext<'a>, { @@ -380,6 +341,7 @@ impl<'a, 'mir, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a Frame<'mir, 'tcx> locals, block, stmt, + extra: _, } = self; FrameSnapshot { @@ -443,21 +405,11 @@ impl<'a, 'mir, 'tcx> Hash for EvalSnapshot<'a, 'mir, 'tcx> } } -// Not using the macro because we need special handling for `memory`, which the macro -// does not support at the same time as the extra bounds on the type. -impl<'a, 'b, 'mir, 'tcx> HashStable> - for EvalSnapshot<'a, 'mir, 'tcx> -{ - fn hash_stable( - &self, - hcx: &mut StableHashingContext<'b>, - hasher: &mut StableHasher) - { - // Not hashing memory: Avoid hashing memory all the time during execution - let EvalSnapshot{ memory: _, stack } = self; - stack.hash_stable(hcx, hasher); - } -} +impl_stable_hash_for!(impl<'tcx, 'b, 'mir> for struct EvalSnapshot<'b, 'mir, 'tcx> { + // Not hashing memory: Avoid hashing memory all the time during execution + memory -> _, + stack, +}); impl<'a, 'mir, 'tcx> Eq for EvalSnapshot<'a, 'mir, 'tcx> {} diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index 1bab536e3e..8814118f65 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -118,14 +118,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // interpreter is solely intended for borrowck'ed code. FakeRead(..) => {} - // Validity checks. - Validate(op, ref places) => { - for operand in places { - M::validation_op(self, op, operand)?; - } + // Stacked Borrows. + Retag { fn_entry, ref place } => { + let dest = self.eval_place(place)?; + M::retag(self, fn_entry, dest)?; + } + EscapeToRaw(ref op) => { + let op = self.eval_operand(op, None)?; + M::escape_to_raw(self, op)?; } - EndRegion(..) => {} + // Statements we do not track. AscribeUserType(..) => {} // Defined to do nothing. These are added by optimization passes, to avoid changing the @@ -160,9 +163,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> BinaryOp(bin_op, ref left, ref right) => { let layout = if binop_left_homogeneous(bin_op) { Some(dest.layout) } else { None }; - let left = self.read_value(self.eval_operand(left, layout)?)?; + let left = self.read_immediate(self.eval_operand(left, layout)?)?; let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; - let right = self.read_value(self.eval_operand(right, layout)?)?; + let right = self.read_immediate(self.eval_operand(right, layout)?)?; self.binop_ignore_overflow( bin_op, left, @@ -173,9 +176,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> CheckedBinaryOp(bin_op, ref left, ref right) => { // Due to the extra boolean in the result, we can never reuse the `dest.layout`. - let left = self.read_value(self.eval_operand(left, None)?)?; + let left = self.read_immediate(self.eval_operand(left, None)?)?; let layout = if binop_right_homogeneous(bin_op) { Some(left.layout) } else { None }; - let right = self.read_value(self.eval_operand(right, layout)?)?; + let right = self.read_immediate(self.eval_operand(right, layout)?)?; self.binop_with_overflow( bin_op, left, @@ -186,7 +189,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> UnaryOp(un_op, ref operand) => { // The operand always has the same type as the result. - let val = self.read_value(self.eval_operand(operand, Some(dest.layout))?)?; + let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?; let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?; self.write_scalar(val, dest)?; } @@ -218,7 +221,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> Repeat(ref operand, _) => { let op = self.eval_operand(operand, None)?; let dest = self.force_allocation(dest)?; - let length = dest.len(&self)?; + let length = dest.len(self)?; if length > 0 { // write the first @@ -228,7 +231,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> if length > 1 { // copy the rest let (dest, dest_align) = first.to_scalar_ptr_align(); - let rest = dest.ptr_offset(first.layout.size, &self)?; + let rest = dest.ptr_offset(first.layout.size, self)?; self.memory.copy_repeatedly( dest, dest_align, rest, dest_align, first.layout.size, length - 1, true )?; @@ -240,7 +243,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // FIXME(CTFE): don't allow computing the length of arrays in const eval let src = self.eval_place(place)?; let mplace = self.force_allocation(src)?; - let len = mplace.len(&self)?; + let len = mplace.len(self)?; let size = self.pointer_size(); self.write_scalar( Scalar::from_uint(len, size), @@ -248,11 +251,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> )?; } - Ref(_, borrow_kind, ref place) => { + Ref(_, _, ref place) => { let src = self.eval_place(place)?; let val = self.force_allocation(src)?; - let val = self.create_ref(val, Some(borrow_kind))?; - self.write_value(val, dest)?; + self.write_immediate(val.to_ref(), dest)?; } NullaryOp(mir::NullOp::Box, _) => { diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index faeeb24c6c..300f3d639b 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -17,7 +17,7 @@ use rustc_target::spec::abi::Abi; use rustc::mir::interpret::{EvalResult, PointerArithmetic, EvalErrorKind, Scalar}; use super::{ - EvalContext, Machine, Value, OpTy, PlaceTy, MPlaceTy, Operand, StackPopCleanup + EvalContext, Machine, Immediate, OpTy, PlaceTy, MPlaceTy, Operand, StackPopCleanup }; impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { @@ -51,7 +51,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> ref targets, .. } => { - let discr = self.read_value(self.eval_operand(discr, None)?)?; + let discr = self.read_immediate(self.eval_operand(discr, None)?)?; trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. @@ -138,7 +138,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> target, .. } => { - let cond_val = self.read_value(self.eval_operand(cond, None)?)? + let cond_val = self.read_immediate(self.eval_operand(cond, None)?)? .to_scalar()?.to_bool()?; if expected == cond_val { self.goto_block(Some(target))?; @@ -147,10 +147,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> use rustc::mir::interpret::EvalErrorKind::*; return match *msg { BoundsCheck { ref len, ref index } => { - let len = self.read_value(self.eval_operand(len, None)?) + let len = self.read_immediate(self.eval_operand(len, None)?) .expect("can't eval len").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; - let index = self.read_value(self.eval_operand(index, None)?) + let index = self.read_immediate(self.eval_operand(index, None)?) .expect("can't eval index").to_scalar()? .to_bits(self.memory().pointer_size())? as u64; err!(BoundsCheck { len, index }) @@ -182,6 +182,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } fn check_argument_compat( + rust_abi: bool, caller: TyLayout<'tcx>, callee: TyLayout<'tcx>, ) -> bool { @@ -189,13 +190,20 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // No question return true; } + if !rust_abi { + // Don't risk anything + return false; + } // Compare layout match (&caller.abi, &callee.abi) { + // Different valid ranges are okay (once we enforce validity, + // that will take care to make it UB to leave the range, just + // like for transmute). (layout::Abi::Scalar(ref caller), layout::Abi::Scalar(ref callee)) => - // Different valid ranges are okay (once we enforce validity, - // that will take care to make it UB to leave the range, just - // like for transmute). caller.value == callee.value, + (layout::Abi::ScalarPair(ref caller1, ref caller2), + layout::Abi::ScalarPair(ref callee1, ref callee2)) => + caller1.value == callee1.value && caller2.value == callee2.value, // Be conservative _ => false } @@ -204,22 +212,22 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> /// Pass a single argument, checking the types for compatibility. fn pass_argument( &mut self, - skip_zst: bool, + rust_abi: bool, caller_arg: &mut impl Iterator>, callee_arg: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - if skip_zst && callee_arg.layout.is_zst() { + if rust_abi && callee_arg.layout.is_zst() { // Nothing to do. trace!("Skipping callee ZST"); return Ok(()); } let caller_arg = caller_arg.next() .ok_or_else(|| EvalErrorKind::FunctionArgCountMismatch)?; - if skip_zst { + if rust_abi { debug_assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out"); } // Now, check - if !Self::check_argument_compat(caller_arg.layout, callee_arg.layout) { + if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) { return err!(FunctionArgMismatch(caller_arg.layout.ty, callee_arg.layout.ty)); } // We allow some transmutes here @@ -256,6 +264,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> self.dump_place(*dest); Ok(()) } + ty::InstanceDef::VtableShim(..) | ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::DropGlue(..) | @@ -318,7 +327,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // Figure out how to pass which arguments. // We have two iterators: Where the arguments come from, // and where they go to. - let skip_zst = match caller_abi { + let rust_abi = match caller_abi { Abi::Rust | Abi::RustCall => true, _ => false }; @@ -343,7 +352,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> }; // Skip ZSTs let mut caller_iter = caller_args.iter() - .filter(|op| !skip_zst || !op.layout.is_zst()) + .filter(|op| !rust_abi || !op.layout.is_zst()) .map(|op| *op); // Now we have to spread them out across the callee's locals, @@ -358,11 +367,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // Must be a tuple for i in 0..dest.layout.fields.count() { let dest = self.place_field(dest, i as u64)?; - self.pass_argument(skip_zst, &mut caller_iter, dest)?; + self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } else { // Normal argument - self.pass_argument(skip_zst, &mut caller_iter, dest)?; + self.pass_argument(rust_abi, &mut caller_iter, dest)?; } } // Now we should have no more caller args @@ -373,7 +382,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // Don't forget to check the return type! if let Some(caller_ret) = dest { let callee_ret = self.eval_place(&mir::Place::Local(mir::RETURN_PLACE))?; - if !Self::check_argument_compat(caller_ret.layout, callee_ret.layout) { + if !Self::check_argument_compat( + rust_abi, + caller_ret.layout, + callee_ret.layout, + ) { return err!(FunctionRetMismatch( caller_ret.layout.ty, callee_ret.layout.ty )); @@ -400,12 +413,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { let ptr_size = self.pointer_size(); - let ptr_align = self.tcx.data_layout.pointer_align; - let ptr = self.ref_to_mplace(self.read_value(args[0])?)?; + let ptr = self.deref_operand(args[0])?; let vtable = ptr.vtable()?; - let fn_ptr = self.memory.read_ptr_sized( - vtable.offset(ptr_size * (idx as u64 + 3), &self)?, - ptr_align + self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?; + let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized( + self, + vtable.offset(ptr_size * (idx as u64 + 3), self)?, )?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; @@ -415,8 +428,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let mut args = args.to_vec(); let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty; let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee); - args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?; - args[0].op = Operand::Immediate(Value::Scalar(ptr.ptr.into())); // strip vtable + args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?; + args[0].op = Operand::Immediate(Immediate::Scalar(ptr.ptr.into())); // strip vtable trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call(instance, span, caller_abi, &args, dest, ret) @@ -446,15 +459,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> }; let arg = OpTy { - op: Operand::Immediate(self.create_ref( - place, - None // this is a "raw reference" - )?), + op: Operand::Immediate(place.to_ref()), layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, }; let ty = self.tcx.mk_unit(); // return type is () - let dest = MPlaceTy::dangling(self.layout_of(ty)?, &self); + let dest = MPlaceTy::dangling(self.layout_of(ty)?, self); self.eval_fn_call( instance, diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index a2d4eee284..bda585b8ed 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -42,10 +42,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); let size = layout.size.bytes(); - let align = layout.align.abi(); + let align = layout.align.abi.bytes(); let ptr_size = self.pointer_size(); - let ptr_align = self.tcx.data_layout.pointer_align; + let ptr_align = self.tcx.data_layout.pointer_align.abi; // ///////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_codegen_llvm/meth.rs @@ -54,24 +54,35 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> ptr_size * (3 + methods.len() as u64), ptr_align, MemoryKind::Vtable, - )?; + )?.with_default_tag(); + let tcx = &*self.tcx; - let drop = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); - let drop = self.memory.create_fn_alloc(drop); - self.memory.write_ptr_sized(vtable, ptr_align, Scalar::Ptr(drop).into())?; + let drop = ::monomorphize::resolve_drop_in_place(*tcx, ty); + let drop = self.memory.create_fn_alloc(drop).with_default_tag(); + // no need to do any alignment checks on the memory accesses below, because we know the + // allocation is correctly aligned as we created it above. Also we're only offsetting by + // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`. + self.memory + .get_mut(vtable.alloc_id)? + .write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?; - let size_ptr = vtable.offset(ptr_size, &self)?; - self.memory.write_ptr_sized(size_ptr, ptr_align, Scalar::from_uint(size, ptr_size).into())?; - let align_ptr = vtable.offset(ptr_size * 2, &self)?; - self.memory.write_ptr_sized(align_ptr, ptr_align, - Scalar::from_uint(align, ptr_size).into())?; + let size_ptr = vtable.offset(ptr_size, self)?; + self.memory + .get_mut(size_ptr.alloc_id)? + .write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?; + let align_ptr = vtable.offset(ptr_size * 2, self)?; + self.memory + .get_mut(align_ptr.alloc_id)? + .write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?; for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { let instance = self.resolve(def_id, substs)?; - let fn_ptr = self.memory.create_fn_alloc(instance); - let method_ptr = vtable.offset(ptr_size * (3 + i as u64), &self)?; - self.memory.write_ptr_sized(method_ptr, ptr_align, Scalar::Ptr(fn_ptr).into())?; + let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag(); + let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?; + self.memory + .get_mut(method_ptr.alloc_id)? + .write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?; } } @@ -87,8 +98,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> vtable: Pointer, ) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> { // we don't care about the pointee type, we just want a pointer - let pointer_align = self.tcx.data_layout.pointer_align; - let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?; + self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?; + let drop_fn = self.memory + .get(vtable.alloc_id)? + .read_ptr_sized(self, vtable)? + .to_ptr()?; let drop_instance = self.memory.get_fn(drop_fn)?; trace!("Found drop fn: {:?}", drop_instance); let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx); @@ -103,13 +117,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.pointer_size(); - let pointer_align = self.tcx.data_layout.pointer_align; - let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)? + self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?; + let alloc = self.memory.get(vtable.alloc_id)?; + let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)? .to_bits(pointer_size)? as u64; - let align = self.memory.read_ptr_sized( + let align = alloc.read_ptr_sized( + self, vtable.offset(pointer_size * 2, self)?, - pointer_align )?.to_bits(pointer_size)? as u64; - Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) + Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap())) } } diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index c75fea4fde..d98d05bc01 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -10,22 +10,23 @@ use std::fmt::Write; use std::hash::Hash; +use std::ops::RangeInclusive; use syntax_pos::symbol::Symbol; -use rustc::ty::layout::{self, Size, Align, TyLayout}; +use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx}; use rustc::ty; use rustc_data_structures::fx::FxHashSet; use rustc::mir::interpret::{ - Scalar, AllocType, EvalResult, EvalErrorKind + Scalar, AllocType, EvalResult, EvalErrorKind, }; use super::{ - ValTy, OpTy, MPlaceTy, Machine, EvalContext, ScalarMaybeUndef + OpTy, Machine, EvalContext, ValueVisitor, }; macro_rules! validation_failure { ($what:expr, $where:expr, $details:expr) => {{ - let where_ = path_format($where); + let where_ = path_format(&$where); let where_ = if where_.is_empty() { String::new() } else { @@ -37,7 +38,7 @@ macro_rules! validation_failure { ))) }}; ($what:expr, $where:expr) => {{ - let where_ = path_format($where); + let where_ = path_format(&$where); let where_ = if where_.is_empty() { String::new() } else { @@ -66,18 +67,20 @@ macro_rules! try_validation { }} } -/// We want to show a nice path to the invalid field for diagnotsics, +/// We want to show a nice path to the invalid field for diagnostics, /// but avoid string operations in the happy case where no error happens. /// So we track a `Vec` where `PathElem` contains all the data we /// need to later print something for the user. #[derive(Copy, Clone, Debug)] pub enum PathElem { Field(Symbol), + Variant(Symbol), ClosureVar(Symbol), ArrayElem(usize), TupleElem(usize), Deref, Tag, + DynDowncast, } /// State for tracking recursive validation of references @@ -97,15 +100,6 @@ impl<'tcx, Tag: Copy+Eq+Hash> RefTracking<'tcx, Tag> { } } -// Adding a Deref and making a copy of the path to be put into the queue -// always go together. This one does it with only new allocation. -fn path_clone_and_deref(path: &Vec) -> Vec { - let mut new_path = Vec::with_capacity(path.len()+1); - new_path.clone_from(path); - new_path.push(PathElem::Deref); - new_path -} - /// Format a path fn path_format(path: &Vec) -> String { use self::PathElem::*; @@ -113,469 +107,75 @@ fn path_format(path: &Vec) -> String { let mut out = String::new(); for elem in path.iter() { match elem { - Field(name) => write!(out, ".{}", name).unwrap(), - ClosureVar(name) => write!(out, ".", name).unwrap(), - TupleElem(idx) => write!(out, ".{}", idx).unwrap(), - ArrayElem(idx) => write!(out, "[{}]", idx).unwrap(), + Field(name) => write!(out, ".{}", name), + Variant(name) => write!(out, ".", name), + ClosureVar(name) => write!(out, ".", name), + TupleElem(idx) => write!(out, ".{}", idx), + ArrayElem(idx) => write!(out, "[{}]", idx), Deref => // This does not match Rust syntax, but it is more readable for long paths -- and // some of the other items here also are not Rust syntax. Actually we can't // even use the usual syntax because we are just showing the projections, // not the root. - write!(out, ".").unwrap(), - Tag => write!(out, ".").unwrap(), - } + write!(out, "."), + Tag => write!(out, "."), + DynDowncast => write!(out, "."), + }.unwrap() } out } -fn scalar_format(value: ScalarMaybeUndef) -> String { - match value { - ScalarMaybeUndef::Undef => - "uninitialized bytes".to_owned(), - ScalarMaybeUndef::Scalar(Scalar::Ptr(_)) => - "a pointer".to_owned(), - ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. }) => - bits.to_string(), +// Test if a range that wraps at overflow contains `test` +fn wrapping_range_contains(r: &RangeInclusive, test: u128) -> bool { + let (lo, hi) = r.clone().into_inner(); + if lo > hi { + // Wrapped + (..=hi).contains(&test) || (lo..).contains(&test) + } else { + // Normal + r.contains(&test) } } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// Make sure that `value` is valid for `ty`, *assuming* `ty` is a primitive type. - fn validate_primitive_type( - &self, - value: ValTy<'tcx, M::PointerTag>, - path: &Vec, - ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>, - const_mode: bool, - ) -> EvalResult<'tcx> { - // Go over all the primitive types - let ty = value.layout.ty; - match ty.sty { - ty::Bool => { - let value = value.to_scalar_or_undef(); - try_validation!(value.to_bool(), - scalar_format(value), path, "a boolean"); - }, - ty::Char => { - let value = value.to_scalar_or_undef(); - try_validation!(value.to_char(), - scalar_format(value), path, "a valid unicode codepoint"); - }, - ty::Float(_) | ty::Int(_) | ty::Uint(_) => { - // NOTE: Keep this in sync with the array optimization for int/float - // types below! - let size = value.layout.size; - let value = value.to_scalar_or_undef(); - if const_mode { - // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous - try_validation!(value.to_bits(size), - scalar_format(value), path, "initialized plain bits"); - } else { - // At run-time, for now, we accept *anything* for these types, including - // undef. We should fix that, but let's start low. - } - } - _ if ty.is_box() || ty.is_region_ptr() || ty.is_unsafe_ptr() => { - // Handle fat pointers. We also check fat raw pointers, - // their metadata must be valid! - // This also checks that the ptr itself is initialized, which - // seems reasonable even for raw pointers. - let place = try_validation!(self.ref_to_mplace(value), - "undefined data in pointer", path); - // Check metadata early, for better diagnostics - if place.layout.is_unsized() { - let tail = self.tcx.struct_tail(place.layout.ty); - match tail.sty { - ty::Dynamic(..) => { - let vtable = try_validation!(place.meta.unwrap().to_ptr(), - "non-pointer vtable in fat pointer", path); - try_validation!(self.read_drop_type_from_vtable(vtable), - "invalid drop fn in vtable", path); - try_validation!(self.read_size_and_align_from_vtable(vtable), - "invalid size or align in vtable", path); - // FIXME: More checks for the vtable. - } - ty::Slice(..) | ty::Str => { - try_validation!(place.meta.unwrap().to_usize(self), - "non-integer slice length in fat pointer", path); - } - ty::Foreign(..) => { - // Unsized, but not fat. - } - _ => - bug!("Unexpected unsized type tail: {:?}", tail), - } - } - // for safe ptrs, also check the ptr values itself - if !ty.is_unsafe_ptr() { - // Make sure this is non-NULL and aligned - let (size, align) = self.size_and_align_of(place.meta, place.layout)? - // for the purpose of validity, consider foreign types to have - // alignment and size determined by the layout (size will be 0, - // alignment should take attributes into account). - .unwrap_or_else(|| place.layout.size_and_align()); - match self.memory.check_align(place.ptr, align) { - Ok(_) => {}, - Err(err) => match err.kind { - EvalErrorKind::InvalidNullPointerUsage => - return validation_failure!("NULL reference", path), - EvalErrorKind::AlignmentCheckFailed { .. } => - return validation_failure!("unaligned reference", path), - _ => - return validation_failure!( - "dangling (out-of-bounds) reference (might be NULL at \ - run-time)", - path - ), - } - } - // non-ZST also have to be dereferenceable - if size != Size::ZERO { - let ptr = try_validation!(place.ptr.to_ptr(), - "integer pointer in non-ZST reference", path); - if const_mode { - // Skip validation entirely for some external statics - let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id); - if let Some(AllocType::Static(did)) = alloc_kind { - // `extern static` cannot be validated as they have no body. - // FIXME: Statics from other crates are also skipped. - // They might be checked at a different type, but for now we - // want to avoid recursing too deeply. This is not sound! - if !did.is_local() || self.tcx.is_foreign_item(did) { - return Ok(()); - } - } - } - try_validation!(self.memory.check_bounds(ptr, size, false), - "dangling (not entirely in bounds) reference", path); - } - if let Some(ref_tracking) = ref_tracking { - // Check if we have encountered this pointer+layout combination - // before. Proceed recursively even for integer pointers, no - // reason to skip them! They are (recursively) valid for some ZST, - // but not for others (e.g. `!` is a ZST). - let op = place.into(); - if ref_tracking.seen.insert(op) { - trace!("Recursing below ptr {:#?}", *op); - ref_tracking.todo.push((op, path_clone_and_deref(path))); - } - } - } - } - ty::FnPtr(_sig) => { - let value = value.to_scalar_or_undef(); - let ptr = try_validation!(value.to_ptr(), - scalar_format(value), path, "a pointer"); - let _fn = try_validation!(self.memory.get_fn(ptr), - scalar_format(value), path, "a function pointer"); - // FIXME: Check if the signature matches - } - // This should be all the primitive types - ty::Never => bug!("Uninhabited type should have been caught earlier"), - _ => bug!("Unexpected primitive type {}", value.layout.ty) - } - Ok(()) - } - - /// Make sure that `value` matches the - fn validate_scalar_layout( - &self, - value: ScalarMaybeUndef, - size: Size, - path: &Vec, - layout: &layout::Scalar, - ) -> EvalResult<'tcx> { - let (lo, hi) = layout.valid_range.clone().into_inner(); - let max_hi = u128::max_value() >> (128 - size.bits()); // as big as the size fits - assert!(hi <= max_hi); - if (lo == 0 && hi == max_hi) || (hi + 1 == lo) { - // Nothing to check - return Ok(()); - } - // At least one value is excluded. Get the bits. - let value = try_validation!(value.not_undef(), - scalar_format(value), path, format!("something in the range {:?}", layout.valid_range)); - let bits = match value { - Scalar::Ptr(ptr) => { - if lo == 1 && hi == max_hi { - // only NULL is not allowed. - // We can call `check_align` to check non-NULL-ness, but have to also look - // for function pointers. - let non_null = - self.memory.check_align( - Scalar::Ptr(ptr), Align::from_bytes(1, 1).unwrap() - ).is_ok() || - self.memory.get_fn(ptr).is_ok(); - if !non_null { - // could be NULL - return validation_failure!("a potentially NULL pointer", path); - } - return Ok(()); - } else { - // Conservatively, we reject, because the pointer *could* have this - // value. - return validation_failure!( - "a pointer", - path, - format!( - "something that cannot possibly be outside the (wrapping) range {:?}", - layout.valid_range - ) - ); - } - } - Scalar::Bits { bits, size: value_size } => { - assert_eq!(value_size as u64, size.bytes()); - bits - } - }; - // Now compare. This is slightly subtle because this is a special "wrap-around" range. - use std::ops::RangeInclusive; - let in_range = |bound: RangeInclusive| bound.contains(&bits); - if lo > hi { - // wrapping around - if in_range(0..=hi) || in_range(lo..=max_hi) { - Ok(()) - } else { - validation_failure!( - bits, - path, - format!("something in the range {:?} or {:?}", 0..=hi, lo..=max_hi) - ) - } +// Formats such that a sentence like "expected something {}" to mean +// "expected something " makes sense. +fn wrapping_range_format(r: &RangeInclusive, max_hi: u128) -> String { + let (lo, hi) = r.clone().into_inner(); + debug_assert!(hi <= max_hi); + if lo > hi { + format!("less or equal to {}, or greater or equal to {}", hi, lo) + } else { + if lo == 0 { + debug_assert!(hi < max_hi, "should not be printing if the range covers everything"); + format!("less or equal to {}", hi) + } else if hi == max_hi { + format!("greater or equal to {}", lo) } else { - if in_range(layout.valid_range.clone()) { - Ok(()) - } else { - validation_failure!( - bits, - path, - if hi == max_hi { - format!("something greater or equal to {}", lo) - } else { - format!("something in the range {:?}", layout.valid_range) - } - ) - } + format!("in the range {:?}", r) } } +} - /// This function checks the data at `op`. `op` is assumed to cover valid memory if it - /// is an indirect operand. - /// It will error if the bits at the destination do not match the ones described by the layout. - /// The `path` may be pushed to, but the part that is present when the function - /// starts must not be changed! - /// - /// `ref_tracking` can be None to avoid recursive checking below references. - /// This also toggles between "run-time" (no recursion) and "compile-time" (with recursion) - /// validation (e.g., pointer values are fine in integers at runtime). - pub fn validate_operand( - &self, - dest: OpTy<'tcx, M::PointerTag>, - path: &mut Vec, - mut ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>, - const_mode: bool, - ) -> EvalResult<'tcx> { - trace!("validate_operand: {:?}, {:?}", *dest, dest.layout.ty); +struct ValidityVisitor<'rt, 'a: 'rt, 'mir: 'rt, 'tcx: 'a+'rt+'mir, M: Machine<'a, 'mir, 'tcx>+'rt> { + /// The `path` may be pushed to, but the part that is present when a function + /// starts must not be changed! `visit_fields` and `visit_array` rely on + /// this stack discipline. + path: Vec, + ref_tracking: Option<&'rt mut RefTracking<'tcx, M::PointerTag>>, + const_mode: bool, + ecx: &'rt EvalContext<'a, 'mir, 'tcx, M>, +} - // If this is a multi-variant layout, we have find the right one and proceed with that. - // (No good reasoning to make this recursion, but it is equivalent to that.) - let dest = match dest.layout.variants { - layout::Variants::NicheFilling { .. } | - layout::Variants::Tagged { .. } => { - let variant = match self.read_discriminant(dest) { - Ok(res) => res.1, - Err(err) => match err.kind { - EvalErrorKind::InvalidDiscriminant(val) => - return validation_failure!( - format!("invalid enum discriminant {}", val), path - ), - _ => - return validation_failure!( - format!("non-integer enum discriminant"), path - ), - } - }; - // Put the variant projection onto the path, as a field - path.push(PathElem::Field(dest.layout.ty - .ty_adt_def() - .unwrap() - .variants[variant].name)); - // Proceed with this variant - let dest = self.operand_downcast(dest, variant)?; - trace!("variant layout: {:#?}", dest.layout); - dest - }, - layout::Variants::Single { .. } => dest, - }; - - // First thing, find the real type: - // If it is a trait object, switch to the actual type that was used to create it. - let dest = match dest.layout.ty.sty { - ty::Dynamic(..) => { - let dest = dest.to_mem_place(); // immediate trait objects are not a thing - self.unpack_dyn_trait(dest)?.1.into() - }, - _ => dest - }; - - // If this is a scalar, validate the scalar layout. - // Things can be aggregates and have scalar layout at the same time, and that - // is very relevant for `NonNull` and similar structs: We need to validate them - // at their scalar layout *before* descending into their fields. - // FIXME: We could avoid some redundant checks here. For newtypes wrapping - // scalars, we do the same check on every "level" (e.g. first we check - // MyNewtype and then the scalar in there). - match dest.layout.abi { - layout::Abi::Uninhabited => - return validation_failure!("a value of an uninhabited type", path), - layout::Abi::Scalar(ref layout) => { - let value = try_validation!(self.read_scalar(dest), - "uninitialized or unrepresentable data", path); - self.validate_scalar_layout(value, dest.layout.size, &path, layout)?; - } - // FIXME: Should we do something for ScalarPair? Vector? - _ => {} - } - - // Check primitive types. We do this after checking the scalar layout, - // just to have that done as well. Primitives can have varying layout, - // so we check them separately and before aggregate handling. - // It is CRITICAL that we get this check right, or we might be - // validating the wrong thing! - let primitive = match dest.layout.fields { - // Primitives appear as Union with 0 fields -- except for fat pointers. - layout::FieldPlacement::Union(0) => true, - _ => dest.layout.ty.builtin_deref(true).is_some(), - }; - if primitive { - let value = try_validation!(self.read_value(dest), - "uninitialized or unrepresentable data", path); - return self.validate_primitive_type( - value, - &path, - ref_tracking, - const_mode, - ); - } - - // Validate all fields of compound data structures - let path_len = path.len(); // Remember the length, in case we need to truncate - match dest.layout.fields { - layout::FieldPlacement::Union(..) => { - // We can't check unions, their bits are allowed to be anything. - // The fields don't need to correspond to any bit pattern of the union's fields. - // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389 - }, - layout::FieldPlacement::Arbitrary { ref offsets, .. } => { - // Go look at all the fields - for i in 0..offsets.len() { - let field = self.operand_field(dest, i as u64)?; - path.push(self.aggregate_field_path_elem(dest.layout, i)); - self.validate_operand( - field, - path, - ref_tracking.as_mut().map(|r| &mut **r), - const_mode, - )?; - path.truncate(path_len); - } - } - layout::FieldPlacement::Array { stride, .. } => { - let dest = if dest.layout.is_zst() { - // it's a ZST, the memory content cannot matter - MPlaceTy::dangling(dest.layout, self) - } else { - // non-ZST array/slice/str cannot be immediate - dest.to_mem_place() - }; - match dest.layout.ty.sty { - // Special handling for strings to verify UTF-8 - ty::Str => { - try_validation!(self.read_str(dest), - "uninitialized or non-UTF-8 data in str", path); - } - // Special handling for arrays/slices of builtin integer types - ty::Array(tys, ..) | ty::Slice(tys) if { - // This optimization applies only for integer and floating point types - // (i.e., types that can hold arbitrary bytes). - match tys.sty { - ty::Int(..) | ty::Uint(..) | ty::Float(..) => true, - _ => false, - } - } => { - // This is the length of the array/slice. - let len = dest.len(self)?; - // Since primitive types are naturally aligned and tightly packed in arrays, - // we can use the stride to get the size of the integral type. - let ty_size = stride.bytes(); - // This is the size in bytes of the whole array. - let size = Size::from_bytes(ty_size * len); - - // NOTE: Keep this in sync with the handling of integer and float - // types above, in `validate_primitive_type`. - // In run-time mode, we accept pointers in here. This is actually more - // permissive than a per-element check would be, e.g. we accept - // an &[u8] that contains a pointer even though bytewise checking would - // reject it. However, that's good: We don't inherently want - // to reject those pointers, we just do not have the machinery to - // talk about parts of a pointer. - // We also accept undef, for consistency with the type-based checks. - match self.memory.check_bytes( - dest.ptr, - size, - /*allow_ptr_and_undef*/!const_mode, - ) { - // In the happy case, we needn't check anything else. - Ok(()) => {}, - // Some error happened, try to provide a more detailed description. - Err(err) => { - // For some errors we might be able to provide extra information - match err.kind { - EvalErrorKind::ReadUndefBytes(offset) => { - // Some byte was undefined, determine which - // element that byte belongs to so we can - // provide an index. - let i = (offset.bytes() / ty_size) as usize; - path.push(PathElem::ArrayElem(i)); - - return validation_failure!( - "undefined bytes", path - ) - }, - // Other errors shouldn't be possible - _ => return Err(err), - } - } - } - }, - _ => { - // This handles the unsized case correctly as well, as well as - // SIMD an all sorts of other array-like types. - for (i, field) in self.mplace_array_fields(dest)?.enumerate() { - let field = field?; - path.push(PathElem::ArrayElem(i)); - self.validate_operand( - field.into(), - path, - ref_tracking.as_mut().map(|r| &mut **r), - const_mode, - )?; - path.truncate(path_len); - } - } - } - }, - } - Ok(()) - } - - fn aggregate_field_path_elem(&self, layout: TyLayout<'tcx>, field: usize) -> PathElem { +impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> ValidityVisitor<'rt, 'a, 'mir, 'tcx, M> { + fn aggregate_field_path_elem( + &mut self, + layout: TyLayout<'tcx>, + field: usize, + ) -> PathElem { match layout.ty.sty { // generators and closures. ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => { - if let Some(upvar) = self.tcx.optimized_mir(def_id).upvar_decls.get(field) { + if let Some(upvar) = self.ecx.tcx.optimized_mir(def_id).upvar_decls.get(field) { PathElem::ClosureVar(upvar.debug_name) } else { // Sometimes the index is beyond the number of freevars (seen @@ -589,18 +189,434 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // enums ty::Adt(def, ..) if def.is_enum() => { - let variant = match layout.variants { - layout::Variants::Single { index } => &def.variants[index], - _ => bug!("aggregate_field_path_elem: got enum but not in a specific variant"), - }; - PathElem::Field(variant.fields[field].ident.name) + // we might be projecting *to* a variant, or to a field *in*a variant. + match layout.variants { + layout::Variants::Single { index } => + // Inside a variant + PathElem::Field(def.variants[index].fields[field].ident.name), + _ => bug!(), + } } // other ADTs ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name), + // arrays/slices + ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field), + + // dyn traits + ty::Dynamic(..) => PathElem::DynDowncast, + // nothing else has an aggregate layout _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty), } } + + fn visit_elem( + &mut self, + new_op: OpTy<'tcx, M::PointerTag>, + elem: PathElem, + ) -> EvalResult<'tcx> { + // Remember the old state + let path_len = self.path.len(); + // Perform operation + self.path.push(elem); + self.visit_value(new_op)?; + // Undo changes + self.path.truncate(path_len); + Ok(()) + } +} + +impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> + ValueVisitor<'a, 'mir, 'tcx, M> for ValidityVisitor<'rt, 'a, 'mir, 'tcx, M> +{ + type V = OpTy<'tcx, M::PointerTag>; + + #[inline(always)] + fn ecx(&self) -> &EvalContext<'a, 'mir, 'tcx, M> { + &self.ecx + } + + #[inline] + fn visit_field( + &mut self, + old_op: OpTy<'tcx, M::PointerTag>, + field: usize, + new_op: OpTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx> { + let elem = self.aggregate_field_path_elem(old_op.layout, field); + self.visit_elem(new_op, elem) + } + + #[inline] + fn visit_variant( + &mut self, + old_op: OpTy<'tcx, M::PointerTag>, + variant_id: VariantIdx, + new_op: OpTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx> { + let name = old_op.layout.ty.ty_adt_def().unwrap().variants[variant_id].name; + self.visit_elem(new_op, PathElem::Variant(name)) + } + + #[inline] + fn visit_value(&mut self, op: OpTy<'tcx, M::PointerTag>) -> EvalResult<'tcx> + { + trace!("visit_value: {:?}, {:?}", *op, op.layout); + // Translate some possible errors to something nicer. + match self.walk_value(op) { + Ok(()) => Ok(()), + Err(err) => match err.kind { + EvalErrorKind::InvalidDiscriminant(val) => + validation_failure!( + val, self.path, "a valid enum discriminant" + ), + EvalErrorKind::ReadPointerAsBytes => + validation_failure!( + "a pointer", self.path, "plain (non-pointer) bytes" + ), + _ => Err(err), + } + } + } + + fn visit_primitive(&mut self, value: OpTy<'tcx, M::PointerTag>) -> EvalResult<'tcx> + { + let value = self.ecx.read_immediate(value)?; + // Go over all the primitive types + let ty = value.layout.ty; + match ty.sty { + ty::Bool => { + let value = value.to_scalar_or_undef(); + try_validation!(value.to_bool(), + value, self.path, "a boolean"); + }, + ty::Char => { + let value = value.to_scalar_or_undef(); + try_validation!(value.to_char(), + value, self.path, "a valid unicode codepoint"); + }, + ty::Float(_) | ty::Int(_) | ty::Uint(_) => { + // NOTE: Keep this in sync with the array optimization for int/float + // types below! + let size = value.layout.size; + let value = value.to_scalar_or_undef(); + if self.const_mode { + // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous + try_validation!(value.to_bits(size), + value, self.path, "initialized plain (non-pointer) bytes"); + } else { + // At run-time, for now, we accept *anything* for these types, including + // undef. We should fix that, but let's start low. + } + } + ty::RawPtr(..) => { + if self.const_mode { + // Integers/floats in CTFE: For consistency with integers, we do not + // accept undef. + let _ptr = try_validation!(value.to_scalar_ptr(), + "undefined address in raw pointer", self.path); + let _meta = try_validation!(value.to_meta(), + "uninitialized data in raw fat pointer metadata", self.path); + } else { + // Remain consistent with `usize`: Accept anything. + } + } + _ if ty.is_box() || ty.is_region_ptr() => { + // Handle fat pointers. + // Check metadata early, for better diagnostics + let ptr = try_validation!(value.to_scalar_ptr(), + "undefined address in pointer", self.path); + let meta = try_validation!(value.to_meta(), + "uninitialized data in fat pointer metadata", self.path); + let layout = self.ecx.layout_of(value.layout.ty.builtin_deref(true).unwrap().ty)?; + if layout.is_unsized() { + let tail = self.ecx.tcx.struct_tail(layout.ty); + match tail.sty { + ty::Dynamic(..) => { + let vtable = try_validation!(meta.unwrap().to_ptr(), + "non-pointer vtable in fat pointer", self.path); + try_validation!(self.ecx.read_drop_type_from_vtable(vtable), + "invalid drop fn in vtable", self.path); + try_validation!(self.ecx.read_size_and_align_from_vtable(vtable), + "invalid size or align in vtable", self.path); + // FIXME: More checks for the vtable. + } + ty::Slice(..) | ty::Str => { + try_validation!(meta.unwrap().to_usize(self.ecx), + "non-integer slice length in fat pointer", self.path); + } + ty::Foreign(..) => { + // Unsized, but not fat. + } + _ => + bug!("Unexpected unsized type tail: {:?}", tail), + } + } + // Make sure this is non-NULL and aligned + let (size, align) = self.ecx.size_and_align_of(meta, layout)? + // for the purpose of validity, consider foreign types to have + // alignment and size determined by the layout (size will be 0, + // alignment should take attributes into account). + .unwrap_or_else(|| (layout.size, layout.align.abi)); + match self.ecx.memory.check_align(ptr, align) { + Ok(_) => {}, + Err(err) => { + error!("{:?} is not aligned to {:?}", ptr, align); + match err.kind { + EvalErrorKind::InvalidNullPointerUsage => + return validation_failure!("NULL reference", self.path), + EvalErrorKind::AlignmentCheckFailed { .. } => + return validation_failure!("unaligned reference", self.path), + _ => + return validation_failure!( + "dangling (out-of-bounds) reference (might be NULL at \ + run-time)", + self.path + ), + } + } + } + // Recursive checking + if let Some(ref mut ref_tracking) = self.ref_tracking { + assert!(self.const_mode, "We should only do recursie checking in const mode"); + let place = self.ecx.ref_to_mplace(value)?; + if size != Size::ZERO { + // Non-ZST also have to be dereferencable + let ptr = try_validation!(place.ptr.to_ptr(), + "integer pointer in non-ZST reference", self.path); + // Skip validation entirely for some external statics + let alloc_kind = self.ecx.tcx.alloc_map.lock().get(ptr.alloc_id); + if let Some(AllocType::Static(did)) = alloc_kind { + // `extern static` cannot be validated as they have no body. + // FIXME: Statics from other crates are also skipped. + // They might be checked at a different type, but for now we + // want to avoid recursing too deeply. This is not sound! + if !did.is_local() || self.ecx.tcx.is_foreign_item(did) { + return Ok(()); + } + } + // Maintain the invariant that the place we are checking is + // already verified to be in-bounds. + try_validation!( + self.ecx.memory + .get(ptr.alloc_id)? + .check_bounds(self.ecx, ptr, size), + "dangling (not entirely in bounds) reference", self.path); + } + // Check if we have encountered this pointer+layout combination + // before. Proceed recursively even for integer pointers, no + // reason to skip them! They are (recursively) valid for some ZST, + // but not for others (e.g. `!` is a ZST). + let op = place.into(); + if ref_tracking.seen.insert(op) { + trace!("Recursing below ptr {:#?}", *op); + // We need to clone the path anyway, make sure it gets created + // with enough space for the additional `Deref`. + let mut new_path = Vec::with_capacity(self.path.len()+1); + new_path.clone_from(&self.path); + new_path.push(PathElem::Deref); + // Remember to come back to this later. + ref_tracking.todo.push((op, new_path)); + } + } + } + ty::FnPtr(_sig) => { + let value = value.to_scalar_or_undef(); + let ptr = try_validation!(value.to_ptr(), + value, self.path, "a pointer"); + let _fn = try_validation!(self.ecx.memory.get_fn(ptr), + value, self.path, "a function pointer"); + // FIXME: Check if the signature matches + } + // This should be all the primitive types + _ => bug!("Unexpected primitive type {}", value.layout.ty) + } + Ok(()) + } + + fn visit_uninhabited(&mut self) -> EvalResult<'tcx> + { + validation_failure!("a value of an uninhabited type", self.path) + } + + fn visit_scalar( + &mut self, + op: OpTy<'tcx, M::PointerTag>, + layout: &layout::Scalar, + ) -> EvalResult<'tcx> { + let value = self.ecx.read_scalar(op)?; + // Determine the allowed range + let (lo, hi) = layout.valid_range.clone().into_inner(); + // `max_hi` is as big as the size fits + let max_hi = u128::max_value() >> (128 - op.layout.size.bits()); + assert!(hi <= max_hi); + // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128` + if (lo == 0 && hi == max_hi) || (hi + 1 == lo) { + // Nothing to check + return Ok(()); + } + // At least one value is excluded. Get the bits. + let value = try_validation!(value.not_undef(), + value, self.path, + format!("something in the range {:?}", layout.valid_range)); + let bits = match value { + Scalar::Ptr(ptr) => { + if lo == 1 && hi == max_hi { + // only NULL is not allowed. + // We can call `check_align` to check non-NULL-ness, but have to also look + // for function pointers. + let non_null = + self.ecx.memory.check_align( + Scalar::Ptr(ptr), Align::from_bytes(1).unwrap() + ).is_ok() || + self.ecx.memory.get_fn(ptr).is_ok(); + if !non_null { + // could be NULL + return validation_failure!("a potentially NULL pointer", self.path); + } + return Ok(()); + } else { + // Conservatively, we reject, because the pointer *could* have this + // value. + return validation_failure!( + "a pointer", + self.path, + format!( + "something that cannot possibly fail to be {}", + wrapping_range_format(&layout.valid_range, max_hi) + ) + ); + } + } + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, op.layout.size.bytes()); + bits + } + }; + // Now compare. This is slightly subtle because this is a special "wrap-around" range. + if wrapping_range_contains(&layout.valid_range, bits) { + Ok(()) + } else { + validation_failure!( + bits, + self.path, + format!("something {}", wrapping_range_format(&layout.valid_range, max_hi)) + ) + } + } + + fn visit_aggregate( + &mut self, + op: OpTy<'tcx, M::PointerTag>, + fields: impl Iterator>, + ) -> EvalResult<'tcx> { + match op.layout.ty.sty { + ty::Str => { + let mplace = op.to_mem_place(); // strings are never immediate + try_validation!(self.ecx.read_str(mplace), + "uninitialized or non-UTF-8 data in str", self.path); + } + ty::Array(tys, ..) | ty::Slice(tys) if { + // This optimization applies only for integer and floating point types + // (i.e., types that can hold arbitrary bytes). + match tys.sty { + ty::Int(..) | ty::Uint(..) | ty::Float(..) => true, + _ => false, + } + } => { + // bailing out for zsts is ok, since the array element type can only be int/float + if op.layout.is_zst() { + return Ok(()); + } + // non-ZST array cannot be immediate, slices are never immediate + let mplace = op.to_mem_place(); + // This is the length of the array/slice. + let len = mplace.len(self.ecx)?; + // zero length slices have nothing to be checked + if len == 0 { + return Ok(()); + } + // This is the element type size. + let ty_size = self.ecx.layout_of(tys)?.size; + // This is the size in bytes of the whole array. + let size = ty_size * len; + + let ptr = mplace.ptr.to_ptr()?; + + // NOTE: Keep this in sync with the handling of integer and float + // types above, in `visit_primitive`. + // In run-time mode, we accept pointers in here. This is actually more + // permissive than a per-element check would be, e.g. we accept + // an &[u8] that contains a pointer even though bytewise checking would + // reject it. However, that's good: We don't inherently want + // to reject those pointers, we just do not have the machinery to + // talk about parts of a pointer. + // We also accept undef, for consistency with the type-based checks. + match self.ecx.memory.get(ptr.alloc_id)?.check_bytes( + self.ecx, + ptr, + size, + /*allow_ptr_and_undef*/!self.const_mode, + ) { + // In the happy case, we needn't check anything else. + Ok(()) => {}, + // Some error happened, try to provide a more detailed description. + Err(err) => { + // For some errors we might be able to provide extra information + match err.kind { + EvalErrorKind::ReadUndefBytes(offset) => { + // Some byte was undefined, determine which + // element that byte belongs to so we can + // provide an index. + let i = (offset.bytes() / ty_size.bytes()) as usize; + self.path.push(PathElem::ArrayElem(i)); + + return validation_failure!( + "undefined bytes", self.path + ) + }, + // Other errors shouldn't be possible + _ => return Err(err), + } + } + } + } + _ => { + self.walk_aggregate(op, fields)? // default handler + } + } + Ok(()) + } +} + +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { + /// This function checks the data at `op`. `op` is assumed to cover valid memory if it + /// is an indirect operand. + /// It will error if the bits at the destination do not match the ones described by the layout. + /// + /// `ref_tracking` can be None to avoid recursive checking below references. + /// This also toggles between "run-time" (no recursion) and "compile-time" (with recursion) + /// validation (e.g., pointer values are fine in integers at runtime). + pub fn validate_operand( + &self, + op: OpTy<'tcx, M::PointerTag>, + path: Vec, + ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>, + const_mode: bool, + ) -> EvalResult<'tcx> { + trace!("validate_operand: {:?}, {:?}", *op, op.layout.ty); + + // Construct a visitor + let mut visitor = ValidityVisitor { + path, + ref_tracking, + const_mode, + ecx: self, + }; + + // Run it + visitor.visit_value(op) + } } diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs new file mode 100644 index 0000000000..81e56f3115 --- /dev/null +++ b/src/librustc_mir/interpret/visitor.rs @@ -0,0 +1,360 @@ +//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound +//! types until we arrive at the leaves, with custom handling for primitive types. + +use rustc::ty::layout::{self, TyLayout, VariantIdx}; +use rustc::ty; +use rustc::mir::interpret::{ + EvalResult, +}; + +use super::{ + Machine, EvalContext, MPlaceTy, OpTy, +}; + +// A thing that we can project into, and that has a layout. +// This wouldn't have to depend on `Machine` but with the current type inference, +// that's just more convenient to work with (avoids repeating all the `Machine` bounds). +pub trait Value<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>: Copy +{ + /// Get this value's layout. + fn layout(&self) -> TyLayout<'tcx>; + + /// Make this into an `OpTy`. + fn to_op( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>>; + + /// Create this from an `MPlaceTy`. + fn from_mem_place(MPlaceTy<'tcx, M::PointerTag>) -> Self; + + /// Project to the given enum variant. + fn project_downcast( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + variant: VariantIdx, + ) -> EvalResult<'tcx, Self>; + + /// Project to the n-th field. + fn project_field( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + field: u64, + ) -> EvalResult<'tcx, Self>; +} + +// Operands and memory-places are both values. +// Places in general are not due to `place_field` having to do `force_allocation`. +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> + for OpTy<'tcx, M::PointerTag> +{ + #[inline(always)] + fn layout(&self) -> TyLayout<'tcx> { + self.layout + } + + #[inline(always)] + fn to_op( + self, + _ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { + Ok(self) + } + + #[inline(always)] + fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self { + mplace.into() + } + + #[inline(always)] + fn project_downcast( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + variant: VariantIdx, + ) -> EvalResult<'tcx, Self> { + ecx.operand_downcast(self, variant) + } + + #[inline(always)] + fn project_field( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + field: u64, + ) -> EvalResult<'tcx, Self> { + ecx.operand_field(self, field) + } +} +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> + for MPlaceTy<'tcx, M::PointerTag> +{ + #[inline(always)] + fn layout(&self) -> TyLayout<'tcx> { + self.layout + } + + #[inline(always)] + fn to_op( + self, + _ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { + Ok(self.into()) + } + + #[inline(always)] + fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self { + mplace + } + + #[inline(always)] + fn project_downcast( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + variant: VariantIdx, + ) -> EvalResult<'tcx, Self> { + ecx.mplace_downcast(self, variant) + } + + #[inline(always)] + fn project_field( + self, + ecx: &EvalContext<'a, 'mir, 'tcx, M>, + field: u64, + ) -> EvalResult<'tcx, Self> { + ecx.mplace_field(self, field) + } +} + +macro_rules! make_value_visitor { + ($visitor_trait_name:ident, $($mutability:ident)*) => { + // How to traverse a value and what to do when we are at the leaves. + pub trait $visitor_trait_name<'a, 'mir, 'tcx: 'mir+'a, M: Machine<'a, 'mir, 'tcx>>: Sized { + type V: Value<'a, 'mir, 'tcx, M>; + + /// The visitor must have an `EvalContext` in it. + fn ecx(&$($mutability)* self) + -> &$($mutability)* EvalContext<'a, 'mir, 'tcx, M>; + + // Recursive actions, ready to be overloaded. + /// Visit the given value, dispatching as appropriate to more specialized visitors. + #[inline(always)] + fn visit_value(&mut self, v: Self::V) -> EvalResult<'tcx> + { + self.walk_value(v) + } + /// Visit the given value as a union. No automatic recursion can happen here. + #[inline(always)] + fn visit_union(&mut self, _v: Self::V) -> EvalResult<'tcx> + { + Ok(()) + } + /// Visit this vale as an aggregate, you are even getting an iterator yielding + /// all the fields (still in an `EvalResult`, you have to do error handling yourself). + /// Recurses into the fields. + #[inline(always)] + fn visit_aggregate( + &mut self, + v: Self::V, + fields: impl Iterator>, + ) -> EvalResult<'tcx> { + self.walk_aggregate(v, fields) + } + + /// Called each time we recurse down to a field of a "product-like" aggregate + /// (structs, tuples, arrays and the like, but not enums), passing in old and new value. + /// This gives the visitor the chance to track the stack of nested fields that + /// we are descending through. + #[inline(always)] + fn visit_field( + &mut self, + _old_val: Self::V, + _field: usize, + new_val: Self::V, + ) -> EvalResult<'tcx> { + self.visit_value(new_val) + } + + /// Called for recursing into the field of a generator. These are not known to be + /// initialized, so we treat them like unions. + #[inline(always)] + fn visit_generator_field( + &mut self, + _old_val: Self::V, + _field: usize, + new_val: Self::V, + ) -> EvalResult<'tcx> { + self.visit_union(new_val) + } + + /// Called when recursing into an enum variant. + #[inline(always)] + fn visit_variant( + &mut self, + _old_val: Self::V, + _variant: VariantIdx, + new_val: Self::V, + ) -> EvalResult<'tcx> { + self.visit_value(new_val) + } + + /// Called whenever we reach a value with uninhabited layout. + /// Recursing to fields will *always* continue after this! This is not meant to control + /// whether and how we descend recursively/ into the scalar's fields if there are any, + /// it is meant to provide the chance for additional checks when a value of uninhabited + /// layout is detected. + #[inline(always)] + fn visit_uninhabited(&mut self) -> EvalResult<'tcx> + { Ok(()) } + /// Called whenever we reach a value with scalar layout. + /// We do NOT provide a `ScalarMaybeUndef` here to avoid accessing memory if the + /// visitor is not even interested in scalars. + /// Recursing to fields will *always* continue after this! This is not meant to control + /// whether and how we descend recursively/ into the scalar's fields if there are any, + /// it is meant to provide the chance for additional checks when a value of scalar + /// layout is detected. + #[inline(always)] + fn visit_scalar(&mut self, _v: Self::V, _layout: &layout::Scalar) -> EvalResult<'tcx> + { Ok(()) } + + /// Called whenever we reach a value of primitive type. There can be no recursion + /// below such a value. This is the leaf function. + /// We do *not* provide an `ImmTy` here because some implementations might want + /// to write to the place this primitive lives in. + #[inline(always)] + fn visit_primitive(&mut self, _v: Self::V) -> EvalResult<'tcx> + { Ok(()) } + + // Default recursors. Not meant to be overloaded. + fn walk_aggregate( + &mut self, + v: Self::V, + fields: impl Iterator>, + ) -> EvalResult<'tcx> { + // Now iterate over it. + for (idx, field_val) in fields.enumerate() { + self.visit_field(v, idx, field_val?)?; + } + Ok(()) + } + fn walk_value(&mut self, v: Self::V) -> EvalResult<'tcx> + { + trace!("walk_value: type: {}", v.layout().ty); + // If this is a multi-variant layout, we have find the right one and proceed with + // that. + match v.layout().variants { + layout::Variants::NicheFilling { .. } | + layout::Variants::Tagged { .. } => { + let op = v.to_op(self.ecx())?; + let idx = self.ecx().read_discriminant(op)?.1; + let inner = v.project_downcast(self.ecx(), idx)?; + trace!("walk_value: variant layout: {:#?}", inner.layout()); + // recurse with the inner type + return self.visit_variant(v, idx, inner); + } + layout::Variants::Single { .. } => {} + } + + // Even for single variants, we might be able to get a more refined type: + // If it is a trait object, switch to the actual type that was used to create it. + match v.layout().ty.sty { + ty::Dynamic(..) => { + // immediate trait objects are not a thing + let dest = v.to_op(self.ecx())?.to_mem_place(); + let inner = self.ecx().unpack_dyn_trait(dest)?.1; + trace!("walk_value: dyn object layout: {:#?}", inner.layout); + // recurse with the inner type + return self.visit_field(v, 0, Value::from_mem_place(inner)); + }, + _ => {}, + }; + + // If this is a scalar, visit it as such. + // Things can be aggregates and have scalar layout at the same time, and that + // is very relevant for `NonNull` and similar structs: We need to visit them + // at their scalar layout *before* descending into their fields. + // FIXME: We could avoid some redundant checks here. For newtypes wrapping + // scalars, we do the same check on every "level" (e.g. first we check + // MyNewtype and then the scalar in there). + match v.layout().abi { + layout::Abi::Uninhabited => { + self.visit_uninhabited()?; + } + layout::Abi::Scalar(ref layout) => { + self.visit_scalar(v, layout)?; + } + // FIXME: Should we do something for ScalarPair? Vector? + _ => {} + } + + // Check primitive types. We do this after checking the scalar layout, + // just to have that done as well. Primitives can have varying layout, + // so we check them separately and before aggregate handling. + // It is CRITICAL that we get this check right, or we might be + // validating the wrong thing! + let primitive = match v.layout().fields { + // Primitives appear as Union with 0 fields - except for Boxes and fat pointers. + layout::FieldPlacement::Union(0) => true, + _ => v.layout().ty.builtin_deref(true).is_some(), + }; + if primitive { + return self.visit_primitive(v); + } + + // Proceed into the fields. + match v.layout().fields { + layout::FieldPlacement::Union(fields) => { + // Empty unions are not accepted by rustc. That's great, it means we can + // use that as an unambiguous signal for detecting primitives. Make sure + // we did not miss any primitive. + debug_assert!(fields > 0); + self.visit_union(v) + }, + layout::FieldPlacement::Arbitrary { ref offsets, .. } => { + // Special handling needed for generators: All but the first field + // (which is the state) are actually implicitly `MaybeUninit`, i.e., + // they may or may not be initialized, so we cannot visit them. + match v.layout().ty.sty { + ty::Generator(..) => { + let field = v.project_field(self.ecx(), 0)?; + self.visit_aggregate(v, std::iter::once(Ok(field)))?; + for i in 1..offsets.len() { + let field = v.project_field(self.ecx(), i as u64)?; + self.visit_generator_field(v, i, field)?; + } + Ok(()) + } + _ => { + // FIXME: We collect in a vec because otherwise there are lifetime + // errors: Projecting to a field needs access to `ecx`. + let fields: Vec> = + (0..offsets.len()).map(|i| { + v.project_field(self.ecx(), i as u64) + }) + .collect(); + self.visit_aggregate(v, fields.into_iter()) + } + } + }, + layout::FieldPlacement::Array { .. } => { + // Let's get an mplace first. + let mplace = if v.layout().is_zst() { + // it's a ZST, the memory content cannot matter + MPlaceTy::dangling(v.layout(), self.ecx()) + } else { + // non-ZST array/slice/str cannot be immediate + v.to_op(self.ecx())?.to_mem_place() + }; + // Now we can go over all the fields. + let iter = self.ecx().mplace_array_fields(mplace)? + .map(|f| f.and_then(|f| { + Ok(Value::from_mem_place(f)) + })); + self.visit_aggregate(v, iter) + } + } + } + } + } +} + +make_value_visitor!(ValueVisitor,); +make_value_visitor!(MutValueVisitor,mut); diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 75417982aa..1a35f4da20 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -16,7 +16,6 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(nll)] #![feature(in_band_lifetimes)] -#![cfg_attr(stage0, feature(impl_header_lifetime_elision))] #![feature(slice_patterns)] #![feature(slice_sort_by_cached_key)] #![feature(box_patterns)] @@ -94,7 +93,9 @@ pub fn provide(providers: &mut Providers) { borrow_check::provide(providers); shim::provide(providers); transform::provide(providers); + monomorphize::partitioning::provide(providers); providers.const_eval = const_eval::const_eval_provider; + providers.const_eval_raw = const_eval::const_eval_raw_provider; providers.check_match = hair::pattern::check_match; } diff --git a/src/librustc_mir/monomorphize/collector.rs b/src/librustc_mir/monomorphize/collector.rs index 6b60b5340e..7531f62fda 100644 --- a/src/librustc_mir/monomorphize/collector.rs +++ b/src/librustc_mir/monomorphize/collector.rs @@ -178,10 +178,6 @@ //! Some things are not yet fully implemented in the current version of this //! module. //! -//! ### Initializers of Constants and Statics -//! Since no MIR is constructed yet for initializer expressions of constants and -//! statics we cannot inspect these properly. -//! //! ### Const Fns //! Ideally, no mono item should be generated for const fns unless there //! is a call to them that cannot be evaluated at compile time. At the moment @@ -191,7 +187,6 @@ use rustc::hir::{self, CodegenFnAttrFlags}; use rustc::hir::itemlikevisit::ItemLikeVisitor; -use rustc::hir::Node; use rustc::hir::def_id::DefId; use rustc::mir::interpret::{AllocId, ConstValue}; use rustc::middle::lang_items::{ExchangeMallocFnLangItem, StartFnLangItem}; @@ -202,7 +197,7 @@ use rustc::session::config; use rustc::mir::{self, Location, Promoted}; use rustc::mir::visit::Visitor as MirVisitor; use rustc::mir::mono::MonoItem; -use rustc::mir::interpret::{Scalar, GlobalId, AllocType}; +use rustc::mir::interpret::{Scalar, GlobalId, AllocType, ErrorHandled}; use monomorphize::{self, Instance}; use rustc::util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; @@ -314,7 +309,7 @@ pub fn collect_crate_mono_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, time(tcx.sess, "collecting mono items", || { par_iter(roots).for_each(|root| { - let mut recursion_depths = DefIdMap(); + let mut recursion_depths = DefIdMap::default(); collect_items_rec(tcx, root, visited, @@ -705,6 +700,7 @@ fn visit_instance_use<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, bug!("intrinsic {:?} being reified", def_id); } } + ty::InstanceDef::VtableShim(..) | ty::InstanceDef::Virtual(..) | ty::InstanceDef::DropGlue(_, None) => { // don't need to emit shim if we are calling directly. @@ -731,6 +727,7 @@ fn should_monomorphize_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: -> bool { let def_id = match instance.def { ty::InstanceDef::Item(def_id) => def_id, + ty::InstanceDef::VtableShim(..) | ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::Virtual(..) | ty::InstanceDef::FnPtrShim(..) | @@ -739,27 +736,27 @@ fn should_monomorphize_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: ty::InstanceDef::CloneShim(..) => return true }; - return match tcx.hir.get_if_local(def_id) { - Some(Node::ForeignItem(..)) => { - false // foreign items are linked against, not codegened. - } - Some(_) => true, - None => { - if tcx.is_reachable_non_generic(def_id) || - tcx.is_foreign_item(def_id) || - is_available_upstream_generic(tcx, def_id, instance.substs) - { - // We can link to the item in question, no instance needed - // in this crate - false - } else { - if !tcx.is_mir_available(def_id) { - bug!("Cannot create local mono-item for {:?}", def_id) - } - true - } - } - }; + if tcx.is_foreign_item(def_id) { + // We can always link to foreign items + return false; + } + + if def_id.is_local() { + // local items cannot be referred to locally without monomorphizing them locally + return true; + } + + if tcx.is_reachable_non_generic(def_id) || + is_available_upstream_generic(tcx, def_id, instance.substs) { + // We can link to the item in question, no instance needed + // in this crate + return false; + } + + if !tcx.is_mir_available(def_id) { + bug!("Cannot create local mono-item for {:?}", def_id) + } + return true; fn is_available_upstream_generic<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, @@ -903,17 +900,17 @@ fn create_mono_items_for_vtable_methods<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_ty: Ty<'tcx>, impl_ty: Ty<'tcx>, output: &mut Vec>) { - assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_regions() && - !impl_ty.needs_subst() && !impl_ty.has_escaping_regions()); + assert!(!trait_ty.needs_subst() && !trait_ty.has_escaping_bound_vars() && + !impl_ty.needs_subst() && !impl_ty.has_escaping_bound_vars()); if let ty::Dynamic(ref trait_ty, ..) = trait_ty.sty { let poly_trait_ref = trait_ty.principal().with_self_ty(tcx, impl_ty); - assert!(!poly_trait_ref.has_escaping_regions()); + assert!(!poly_trait_ref.has_escaping_bound_vars()); // Walk all methods of the trait, including those of its supertraits let methods = tcx.vtable_methods(poly_trait_ref); let methods = methods.iter().cloned().filter_map(|method| method) - .map(|(def_id, substs)| ty::Instance::resolve( + .map(|(def_id, substs)| ty::Instance::resolve_for_vtable( tcx, ty::ParamEnv::reveal_all(), def_id, @@ -988,6 +985,20 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemKind::Const(..) => { // const items only generate mono items if they are // actually used somewhere. Just declaring them is insufficient. + + // but even just declaring them must collect the items they refer to + let def_id = self.tcx.hir.local_def_id(item.id); + + let instance = Instance::mono(self.tcx, def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + let param_env = ty::ParamEnv::reveal_all(); + + if let Ok(val) = self.tcx.const_eval(param_env.and(cid)) { + collect_const(self.tcx, val, instance.substs, &mut self.output); + } } hir::ItemKind::Fn(..) => { let def_id = self.tcx.hir.local_def_id(item.id); @@ -1066,7 +1077,7 @@ impl<'b, 'a, 'v> RootCollector<'b, 'a, 'v> { // regions must appear in the argument // listing. let main_ret_ty = self.tcx.erase_regions( - &main_ret_ty.no_late_bound_regions().unwrap(), + &main_ret_ty.no_bound_vars().unwrap(), ); let start_instance = Instance::resolve( @@ -1198,15 +1209,10 @@ fn collect_neighbours<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; match tcx.const_eval(param_env.and(cid)) { Ok(val) => collect_const(tcx, val, instance.substs, output), - Err(err) => { - use rustc::mir::interpret::EvalErrorKind; - if let EvalErrorKind::ReferencedConstant(_) = err.error.kind { - err.report_as_error( - tcx.at(mir.promoted[i].span), - "erroneous constant used", - ); - } - }, + Err(ErrorHandled::Reported) => {}, + Err(ErrorHandled::TooGeneric) => span_bug!( + mir.promoted[i].span, "collection encountered polymorphic constant", + ), } } } @@ -1247,14 +1253,10 @@ fn collect_const<'a, 'tcx>( }; match tcx.const_eval(param_env.and(cid)) { Ok(val) => val.val, - Err(err) => { - let span = tcx.def_span(def_id); - err.report_as_error( - tcx.at(span), - "constant evaluation error", - ); - return; - } + Err(ErrorHandled::Reported) => return, + Err(ErrorHandled::TooGeneric) => span_bug!( + tcx.def_span(def_id), "collection encountered polymorphic constant", + ), } }, _ => constant.val, diff --git a/src/librustc_mir/monomorphize/item.rs b/src/librustc_mir/monomorphize/item.rs index 4c4d56c893..24de92e79f 100644 --- a/src/librustc_mir/monomorphize/item.rs +++ b/src/librustc_mir/monomorphize/item.rs @@ -8,12 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Walks the crate looking for items/impl-items/trait-items that have -//! either a `rustc_symbol_name` or `rustc_item_path` attribute and -//! generates an error giving, respectively, the symbol name or -//! item-path. This is used for unit testing the code that generates -//! paths etc in all kinds of annoying scenarios. - use monomorphize::Instance; use rustc::hir; use rustc::hir::def_id::DefId; @@ -382,7 +376,9 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { self.push_type_params(substs, iter::empty(), output); } ty::Error | + ty::Bound(..) | ty::Infer(_) | + ty::Placeholder(..) | ty::UnnormalizedProjection(..) | ty::Projection(..) | ty::Param(_) | diff --git a/src/librustc_mir/monomorphize/partitioning.rs b/src/librustc_mir/monomorphize/partitioning.rs index 3a1108bb16..3a6ee6da42 100644 --- a/src/librustc_mir/monomorphize/partitioning.rs +++ b/src/librustc_mir/monomorphize/partitioning.rs @@ -102,21 +102,27 @@ //! source-level module, functions from the same module will be available for //! inlining, even when they are not marked #[inline]. -use monomorphize::collector::InliningMap; +use std::collections::hash_map::Entry; +use std::cmp; +use std::sync::Arc; + +use syntax::ast::NodeId; +use syntax::symbol::InternedString; use rustc::dep_graph::{WorkProductId, WorkProduct, DepNode, DepConstructor}; use rustc::hir::CodegenFnAttrFlags; -use rustc::hir::def_id::{DefId, LOCAL_CRATE, CRATE_DEF_INDEX}; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX}; use rustc::hir::map::DefPathData; use rustc::mir::mono::{Linkage, Visibility, CodegenUnitNameBuilder}; use rustc::middle::exported_symbols::SymbolExportLevel; use rustc::ty::{self, TyCtxt, InstanceDef}; use rustc::ty::item_path::characteristic_def_id_of_type; -use rustc::util::nodemap::{FxHashMap, FxHashSet}; -use std::collections::hash_map::Entry; -use std::cmp; -use syntax::ast::NodeId; -use syntax::symbol::InternedString; +use rustc::ty::query::Providers; +use rustc::util::common::time; +use rustc::util::nodemap::{DefIdSet, FxHashMap, FxHashSet}; use rustc::mir::mono::MonoItem; + +use monomorphize::collector::InliningMap; +use monomorphize::collector::{self, MonoItemCollectionMode}; use monomorphize::item::{MonoItemExt, InstantiationMode}; pub use rustc::mir::mono::CodegenUnit; @@ -180,6 +186,7 @@ pub trait CodegenUnitExt<'tcx> { InstanceDef::Item(def_id) => { tcx.hir.as_local_node_id(def_id) } + InstanceDef::VtableShim(..) | InstanceDef::Intrinsic(..) | InstanceDef::FnPtrShim(..) | InstanceDef::Virtual(..) | @@ -422,6 +429,7 @@ fn mono_item_visibility( InstanceDef::Item(def_id) => def_id, // These are all compiler glue and such, never exported, always hidden. + InstanceDef::VtableShim(..) | InstanceDef::FnPtrShim(..) | InstanceDef::Virtual(..) | InstanceDef::Intrinsic(..) | @@ -503,7 +511,7 @@ fn mono_item_visibility( // // * First is weak lang items. These are basically mechanisms for // libcore to forward-reference symbols defined later in crates like - // the standard library or `#[panic_implementation]` definitions. The + // the standard library or `#[panic_handler]` definitions. The // definition of these weak lang items needs to be referenceable by // libcore, so we're no longer a candidate for internalization. // Removal of these functions can't be done by LLVM but rather must be @@ -756,6 +764,7 @@ fn characteristic_def_id_of_mono_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, MonoItem::Fn(instance) => { let def_id = match instance.def { ty::InstanceDef::Item(def_id) => def_id, + ty::InstanceDef::VtableShim(..) | ty::InstanceDef::FnPtrShim(..) | ty::InstanceDef::ClosureOnceShim { .. } | ty::InstanceDef::Intrinsic(..) | @@ -889,3 +898,146 @@ fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } } + +fn collect_and_partition_mono_items<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cnum: CrateNum, +) -> (Arc, Arc>>>) +{ + assert_eq!(cnum, LOCAL_CRATE); + + let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { + Some(ref s) => { + let mode_string = s.to_lowercase(); + let mode_string = mode_string.trim(); + if mode_string == "eager" { + MonoItemCollectionMode::Eager + } else { + if mode_string != "lazy" { + let message = format!("Unknown codegen-item collection mode '{}'. \ + Falling back to 'lazy' mode.", + mode_string); + tcx.sess.warn(&message); + } + + MonoItemCollectionMode::Lazy + } + } + None => { + if tcx.sess.opts.cg.link_dead_code { + MonoItemCollectionMode::Eager + } else { + MonoItemCollectionMode::Lazy + } + } + }; + + let (items, inlining_map) = + time(tcx.sess, "monomorphization collection", || { + collector::collect_crate_mono_items(tcx, collection_mode) + }); + + tcx.sess.abort_if_errors(); + + ::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); + + let strategy = if tcx.sess.opts.incremental.is_some() { + PartitioningStrategy::PerModule + } else { + PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) + }; + + let codegen_units = time(tcx.sess, "codegen unit partitioning", || { + partition( + tcx, + items.iter().cloned(), + strategy, + &inlining_map + ) + .into_iter() + .map(Arc::new) + .collect::>() + }); + + let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { + match *mono_item { + MonoItem::Fn(ref instance) => Some(instance.def_id()), + MonoItem::Static(def_id) => Some(def_id), + _ => None, + } + }).collect(); + + if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { + let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); + + for cgu in &codegen_units { + for (&mono_item, &linkage) in cgu.items() { + item_to_cgus.entry(mono_item) + .or_default() + .push((cgu.name().clone(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = i.to_string(tcx); + output.push_str(" @@"); + let mut empty = Vec::new(); + let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.as_mut_slice().sort_by_cached_key(|&(ref name, _)| name.clone()); + cgus.dedup(); + for &(ref cgu_name, (linkage, _)) in cgus.iter() { + output.push_str(" "); + output.push_str(&cgu_name.as_str()); + + let linkage_abbrev = match linkage { + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", + }; + + output.push_str("["); + output.push_str(linkage_abbrev); + output.push_str("]"); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("MONO_ITEM {}", item); + } + } + + (Arc::new(mono_items), Arc::new(codegen_units)) +} + +pub fn provide(providers: &mut Providers) { + providers.collect_and_partition_mono_items = + collect_and_partition_mono_items; + + providers.is_codegened_item = |tcx, def_id| { + let (all_mono_items, _) = + tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all_mono_items.contains(&def_id) + }; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all.iter() + .find(|cgu| *cgu.name() == name) + .cloned() + .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) + }; +} diff --git a/src/librustc_mir/shim.rs b/src/librustc_mir/shim.rs index 6c32690cdb..04079319a7 100644 --- a/src/librustc_mir/shim.rs +++ b/src/librustc_mir/shim.rs @@ -13,6 +13,7 @@ use rustc::hir::def_id::DefId; use rustc::infer; use rustc::mir::*; use rustc::ty::{self, Ty, TyCtxt, GenericParamDefKind}; +use rustc::ty::layout::VariantIdx; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::query::Providers; @@ -43,6 +44,15 @@ fn make_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut result = match instance { ty::InstanceDef::Item(..) => bug!("item {:?} passed to make_shim", instance), + ty::InstanceDef::VtableShim(def_id) => { + build_call_shim( + tcx, + def_id, + Adjustment::DerefMove, + CallKind::Direct(def_id), + None, + ) + } ty::InstanceDef::FnPtrShim(def_id, ty) => { let trait_ = tcx.trait_of_item(def_id).unwrap(); let adjustment = match tcx.lang_items().fn_trait_kind(trait_) { @@ -128,6 +138,7 @@ fn make_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, enum Adjustment { Identity, Deref, + DerefMove, RefMut, } @@ -186,7 +197,7 @@ fn build_drop_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let source_info = SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }; let return_block = BasicBlock::new(1); - let mut blocks = IndexVec::new(); + let mut blocks = IndexVec::with_capacity(2); let block = |blocks: &mut IndexVec<_, _>, kind| { blocks.push(BasicBlockData { statements: vec![], @@ -212,6 +223,20 @@ fn build_drop_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ); if let Some(..) = ty { + // The first argument (index 0), but add 1 for the return value. + let dropee_ptr = Place::Local(Local::new(1+0)); + if tcx.sess.opts.debugging_opts.mir_emit_retag { + // Function arguments should be retagged + mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement { + source_info, + kind: StatementKind::Retag { fn_entry: true, place: dropee_ptr.clone() }, + }); + // We use raw ptr operations, better prepare the alias tracking for that + mir.basic_blocks_mut()[START_BLOCK].statements.insert(1, Statement { + source_info, + kind: StatementKind::EscapeToRaw(Operand::Copy(dropee_ptr.clone())), + }) + } let patch = { let param_env = tcx.param_env(def_id).with_reveal_all(); let mut elaborator = DropShimElaborator { @@ -220,7 +245,7 @@ fn build_drop_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx, param_env }; - let dropee = Place::Local(Local::new(1+0)).deref(); + let dropee = dropee_ptr.deref(); let resume_block = elaborator.patch.resume_block(); elaborate_drops::elaborate_drop( &mut elaborator, @@ -281,7 +306,7 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> { fn deref_subpath(&self, _path: Self::Path) -> Option { None } - fn downcast_subpath(&self, _path: Self::Path, _variant: usize) -> Option { + fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option { Some(()) } fn array_subpath(&self, _path: Self::Path, _index: u32, _size: u32) -> Option { @@ -701,6 +726,14 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let rcvr = match rcvr_adjustment { Adjustment::Identity => Operand::Move(rcvr_l), Adjustment::Deref => Operand::Copy(rcvr_l.deref()), + Adjustment::DerefMove => { + // fn(Self, ...) -> fn(*mut Self, ...) + let arg_ty = local_decls[rcvr_arg].ty; + assert!(arg_ty.is_self()); + local_decls[rcvr_arg].ty = tcx.mk_mut_ptr(arg_ty); + + Operand::Move(rcvr_l.deref()) + } Adjustment::RefMut => { // let rcvr = &mut rcvr; let ref_rcvr = local_decls.push(temp_decl( @@ -750,7 +783,8 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, })); } - let mut blocks = IndexVec::new(); + let n_blocks = if let Adjustment::RefMut = rcvr_adjustment { 5 } else { 2 }; + let mut blocks = IndexVec::with_capacity(n_blocks); let block = |blocks: &mut IndexVec<_, _>, statements, kind, is_cleanup| { blocks.push(BasicBlockData { statements, @@ -826,7 +860,9 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, let param_env = gcx.param_env(def_id); // Normalize the sig. - let sig = gcx.fn_sig(def_id).no_late_bound_regions().expect("LBR in ADT constructor signature"); + let sig = gcx.fn_sig(def_id) + .no_bound_vars() + .expect("LBR in ADT constructor signature"); let sig = gcx.normalize_erasing_regions(param_env, sig); let (adt_def, substs) = match sig.output().sty { @@ -846,7 +882,7 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, let variant_no = if adt_def.is_enum() { adt_def.variant_index_with_id(def_id) } else { - 0 + VariantIdx::new(0) }; // return = ADT(arg0, arg1, ...); return diff --git a/src/librustc_mir/transform/add_retag.rs b/src/librustc_mir/transform/add_retag.rs new file mode 100644 index 0000000000..be7e34e2dc --- /dev/null +++ b/src/librustc_mir/transform/add_retag.rs @@ -0,0 +1,208 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate. +//! It has to be run really early, before transformations like inlining, because +//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part +//! of MIR building, and only after this pass we think of the program has having the +//! normal MIR semantics. + +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::mir::*; +use transform::{MirPass, MirSource}; + +pub struct AddRetag; + +/// Determines whether this place is "stable": Whether, if we evaluate it again +/// after the assignment, we can be sure to obtain the same place value. +/// (Concurrent accesses by other threads are no problem as these are anyway non-atomic +/// copies. Data races are UB.) +fn is_stable<'tcx>( + place: &Place<'tcx>, +) -> bool { + use rustc::mir::Place::*; + + match *place { + // Locals and statics have stable addresses, for sure + Local { .. } | + Promoted { .. } | + Static { .. } => + true, + // Recurse for projections + Projection(ref proj) => { + match proj.elem { + // Which place this evaluates to can change with any memory write, + // so cannot assume this to be stable. + ProjectionElem::Deref => + false, + // Array indices are intersting, but MIR building generates a *fresh* + // temporary for every array access, so the index cannot be changed as + // a side-effect. + ProjectionElem::Index { .. } | + // The rest is completely boring, they just offset by a constant. + ProjectionElem::Field { .. } | + ProjectionElem::ConstantIndex { .. } | + ProjectionElem::Subslice { .. } | + ProjectionElem::Downcast { .. } => + is_stable(&proj.base), + } + } + } +} + +/// Determine whether this type may have a reference in it, recursing below compound types but +/// not below references. +fn may_have_reference<'a, 'gcx, 'tcx>(ty: Ty<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { + match ty.sty { + // Primitive types that are not references + ty::Bool | ty::Char | + ty::Float(_) | ty::Int(_) | ty::Uint(_) | + ty::RawPtr(..) | ty::FnPtr(..) | + ty::Str | ty::FnDef(..) | ty::Never => + false, + // References + ty::Ref(..) => true, + ty::Adt(..) if ty.is_box() => true, + // Compound types + ty::Array(ty, ..) | ty::Slice(ty) => + may_have_reference(ty, tcx), + ty::Tuple(tys) => + tys.iter().any(|ty| may_have_reference(ty, tcx)), + ty::Adt(adt, substs) => + adt.variants.iter().any(|v| v.fields.iter().any(|f| + may_have_reference(f.ty(tcx, substs), tcx) + )), + // Conservative fallback + _ => true, + } +} + +impl MirPass for AddRetag { + fn run_pass<'a, 'tcx>(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + _src: MirSource, + mir: &mut Mir<'tcx>) + { + if !tcx.sess.opts.debugging_opts.mir_emit_retag { + return; + } + let (span, arg_count) = (mir.span, mir.arg_count); + let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut(); + let needs_retag = |place: &Place<'tcx>| { + // FIXME: Instead of giving up for unstable places, we should introduce + // a temporary and retag on that. + is_stable(place) && may_have_reference(place.ty(&*local_decls, tcx).to_ty(tcx), tcx) + }; + + // PART 1 + // Retag arguments at the beginning of the start block. + { + let source_info = SourceInfo { + scope: OUTERMOST_SOURCE_SCOPE, + span: span, // FIXME: Consider using just the span covering the function + // argument declaration. + }; + // Gather all arguments, skip return value. + let places = local_decls.iter_enumerated().skip(1).take(arg_count) + .map(|(local, _)| Place::Local(local)) + .filter(needs_retag) + .collect::>(); + // Emit their retags. + basic_blocks[START_BLOCK].statements.splice(0..0, + places.into_iter().map(|place| Statement { + source_info, + kind: StatementKind::Retag { fn_entry: true, place }, + }) + ); + } + + // PART 2 + // Retag return values of functions. Also escape-to-raw the argument of `drop`. + // We collect the return destinations because we cannot mutate while iterating. + let mut returns: Vec<(SourceInfo, Place<'tcx>, BasicBlock)> = Vec::new(); + for block_data in basic_blocks.iter_mut() { + match block_data.terminator().kind { + TerminatorKind::Call { ref destination, .. } => { + // Remember the return destination for later + if let Some(ref destination) = destination { + if needs_retag(&destination.0) { + returns.push(( + block_data.terminator().source_info, + destination.0.clone(), + destination.1, + )); + } + } + } + TerminatorKind::Drop { .. } | + TerminatorKind::DropAndReplace { .. } => { + // `Drop` is also a call, but it doesn't return anything so we are good. + } + _ => { + // Not a block ending in a Call -> ignore. + } + } + } + // Now we go over the returns we collected to retag the return values. + for (source_info, dest_place, dest_block) in returns { + basic_blocks[dest_block].statements.insert(0, Statement { + source_info, + kind: StatementKind::Retag { fn_entry: false, place: dest_place }, + }); + } + + // PART 3 + // Add retag after assignment. + for block_data in basic_blocks { + // We want to insert statements as we iterate. To this end, we + // iterate backwards using indices. + for i in (0..block_data.statements.len()).rev() { + match block_data.statements[i].kind { + // If we are casting *from* a reference, we may have to escape-to-raw. + StatementKind::Assign(_, box Rvalue::Cast( + CastKind::Misc, + ref src, + dest_ty, + )) => { + let src_ty = src.ty(&*local_decls, tcx); + if src_ty.is_region_ptr() { + // The only `Misc` casts on references are those creating raw pointers. + assert!(dest_ty.is_unsafe_ptr()); + // Insert escape-to-raw before the cast. We are not concerned + // with stability here: Our EscapeToRaw will not change the value + // that the cast will then use. + // `src` might be a "move", but we rely on this not actually moving + // but just doing a memcpy. It is crucial that we do EscapeToRaw + // on the src because we need it with its original type. + let source_info = block_data.statements[i].source_info; + block_data.statements.insert(i, Statement { + source_info, + kind: StatementKind::EscapeToRaw(src.clone()), + }); + } + } + // Assignments of reference or ptr type are the ones where we may have + // to update tags. This includes `x = &[mut] ...` and hence + // we also retag after taking a reference! + StatementKind::Assign(ref place, _) if needs_retag(place) => { + // Insert a retag after the assignment. + let source_info = block_data.statements[i].source_info; + block_data.statements.insert(i+1, Statement { + source_info, + kind: StatementKind::Retag { fn_entry: false, place: place.clone() }, + }); + } + // Do nothing for the rest + _ => {}, + }; + } + } + } +} diff --git a/src/librustc_mir/transform/add_validation.rs b/src/librustc_mir/transform/add_validation.rs deleted file mode 100644 index 5b489b5db9..0000000000 --- a/src/librustc_mir/transform/add_validation.rs +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate. -//! It has to be run really early, before transformations like inlining, because -//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part -//! of MIR building, and only after this pass we think of the program has having the -//! normal MIR semantics. - -use rustc::ty::{self, TyCtxt, RegionKind}; -use rustc::hir; -use rustc::mir::*; -use rustc::middle::region; -use transform::{MirPass, MirSource}; - -pub struct AddValidation; - -/// Determine the "context" of the place: Mutability and region. -fn place_context<'a, 'tcx, D>( - place: &Place<'tcx>, - local_decls: &D, - tcx: TyCtxt<'a, 'tcx, 'tcx> -) -> (Option, hir::Mutability) - where D: HasLocalDecls<'tcx> -{ - use rustc::mir::Place::*; - - match *place { - Local { .. } => (None, hir::MutMutable), - Promoted(_) | - Static(_) => (None, hir::MutImmutable), - Projection(ref proj) => { - match proj.elem { - ProjectionElem::Deref => { - // Computing the inside the recursion makes this quadratic. - // We don't expect deep paths though. - let ty = proj.base.ty(local_decls, tcx).to_ty(tcx); - // A Deref projection may restrict the context, this depends on the type - // being deref'd. - let context = match ty.sty { - ty::Ref(re, _, mutbl) => { - let re = match re { - &RegionKind::ReScope(ce) => Some(ce), - &RegionKind::ReErased => - bug!("AddValidation pass must be run before erasing lifetimes"), - _ => None - }; - (re, mutbl) - } - ty::RawPtr(_) => - // There is no guarantee behind even a mutable raw pointer, - // no write locks are acquired there, so we also don't want to - // release any. - (None, hir::MutImmutable), - ty::Adt(adt, _) if adt.is_box() => (None, hir::MutMutable), - _ => bug!("Deref on a non-pointer type {:?}", ty), - }; - // "Intersect" this restriction with proj.base. - if let (Some(_), hir::MutImmutable) = context { - // This is already as restricted as it gets, no need to even recurse - context - } else { - let base_context = place_context(&proj.base, local_decls, tcx); - // The region of the outermost Deref is always most restrictive. - let re = context.0.or(base_context.0); - let mutbl = context.1.and(base_context.1); - (re, mutbl) - } - - } - _ => place_context(&proj.base, local_decls, tcx), - } - } - } -} - -/// Check if this function contains an unsafe block or is an unsafe function. -fn fn_contains_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource) -> bool { - use rustc::hir::intravisit::{self, Visitor, FnKind}; - use rustc::hir::map::blocks::FnLikeNode; - use rustc::hir::Node; - - /// Decide if this is an unsafe block - fn block_is_unsafe(block: &hir::Block) -> bool { - use rustc::hir::BlockCheckMode::*; - - match block.rules { - UnsafeBlock(_) | PushUnsafeBlock(_) => true, - // For PopUnsafeBlock, we don't actually know -- but we will always also check all - // parent blocks, so we can safely declare the PopUnsafeBlock to not be unsafe. - DefaultBlock | PopUnsafeBlock(_) => false, - } - } - - /// Decide if this FnLike is a closure - fn fn_is_closure<'a>(fn_like: FnLikeNode<'a>) -> bool { - match fn_like.kind() { - FnKind::Closure(_) => true, - FnKind::Method(..) | FnKind::ItemFn(..) => false, - } - } - - let node_id = tcx.hir.as_local_node_id(src.def_id).unwrap(); - let fn_like = match tcx.hir.body_owner_kind(node_id) { - hir::BodyOwnerKind::Fn => { - match FnLikeNode::from_node(tcx.hir.get(node_id)) { - Some(fn_like) => fn_like, - None => return false, // e.g. struct ctor shims -- such auto-generated code cannot - // contain unsafe. - } - }, - _ => return false, // only functions can have unsafe - }; - - // Test if the function is marked unsafe. - if fn_like.unsafety() == hir::Unsafety::Unsafe { - return true; - } - - // For closures, we need to walk up the parents and see if we are inside an unsafe fn or - // unsafe block. - if fn_is_closure(fn_like) { - let mut cur = fn_like.id(); - loop { - // Go further upwards. - cur = tcx.hir.get_parent_node(cur); - let node = tcx.hir.get(cur); - // Check if this is an unsafe function - if let Some(fn_like) = FnLikeNode::from_node(node) { - if !fn_is_closure(fn_like) { - if fn_like.unsafety() == hir::Unsafety::Unsafe { - return true; - } - } - } - // Check if this is an unsafe block, or an item - match node { - Node::Expr(&hir::Expr { node: hir::ExprKind::Block(ref block, _), ..}) => { - if block_is_unsafe(&*block) { - // Found an unsafe block, we can bail out here. - return true; - } - } - Node::Item(..) => { - // No walking up beyond items. This makes sure the loop always terminates. - break; - } - _ => {}, - } - } - } - - // Visit the entire body of the function and check for unsafe blocks in there - struct FindUnsafe { - found_unsafe: bool, - } - let mut finder = FindUnsafe { found_unsafe: false }; - // Run the visitor on the NodeId we got. Seems like there is no uniform way to do that. - finder.visit_body(tcx.hir.body(fn_like.body())); - - impl<'tcx> Visitor<'tcx> for FindUnsafe { - fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'tcx> { - intravisit::NestedVisitorMap::None - } - - fn visit_block(&mut self, b: &'tcx hir::Block) { - if self.found_unsafe { return; } // short-circuit - - if block_is_unsafe(b) { - // We found an unsafe block. We can stop searching. - self.found_unsafe = true; - } else { - // No unsafe block here, go on searching. - intravisit::walk_block(self, b); - } - } - } - - finder.found_unsafe -} - -impl MirPass for AddValidation { - fn run_pass<'a, 'tcx>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - src: MirSource, - mir: &mut Mir<'tcx>) - { - let emit_validate = tcx.sess.opts.debugging_opts.mir_emit_validate; - if emit_validate == 0 { - return; - } - let restricted_validation = emit_validate == 1 && fn_contains_unsafe(tcx, src); - let (span, arg_count) = (mir.span, mir.arg_count); - let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut(); - - // Convert a place to a validation operand. - let place_to_operand = |place: Place<'tcx>| -> ValidationOperand<'tcx, Place<'tcx>> { - let (re, mutbl) = place_context(&place, local_decls, tcx); - let ty = place.ty(local_decls, tcx).to_ty(tcx); - ValidationOperand { place, ty, re, mutbl } - }; - - // Emit an Acquire at the beginning of the given block. If we are in restricted emission - // mode (mir_emit_validate=1), also emit a Release immediately after the Acquire. - let emit_acquire = |block: &mut BasicBlockData<'tcx>, source_info, operands: Vec<_>| { - if operands.len() == 0 { - return; // Nothing to do - } - // Emit the release first, to avoid cloning if we do not emit it - if restricted_validation { - let release_stmt = Statement { - source_info, - kind: StatementKind::Validate(ValidationOp::Release, operands.clone()), - }; - block.statements.insert(0, release_stmt); - } - // Now, the acquire - let acquire_stmt = Statement { - source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, operands), - }; - block.statements.insert(0, acquire_stmt); - }; - - // PART 1 - // Add an AcquireValid at the beginning of the start block. - { - let source_info = SourceInfo { - scope: OUTERMOST_SOURCE_SCOPE, - span: span, // FIXME: Consider using just the span covering the function - // argument declaration. - }; - // Gather all arguments, skip return value. - let operands = local_decls.iter_enumerated().skip(1).take(arg_count) - .map(|(local, _)| place_to_operand(Place::Local(local))).collect(); - emit_acquire(&mut basic_blocks[START_BLOCK], source_info, operands); - } - - // PART 2 - // Add ReleaseValid/AcquireValid around function call terminators. We don't use a visitor - // because we need to access the block that a Call jumps to. - let mut returns : Vec<(SourceInfo, Place<'tcx>, BasicBlock)> = Vec::new(); - for block_data in basic_blocks.iter_mut() { - match block_data.terminator { - Some(Terminator { kind: TerminatorKind::Call { ref args, ref destination, .. }, - source_info }) => { - // Before the call: Release all arguments *and* the return value. - // The callee may write into the return value! Note that this relies - // on "release of uninitialized" to be a NOP. - if !restricted_validation { - let release_stmt = Statement { - source_info, - kind: StatementKind::Validate(ValidationOp::Release, - destination.iter().map(|dest| place_to_operand(dest.0.clone())) - .chain( - args.iter().filter_map(|op| { - match op { - &Operand::Copy(ref place) | - &Operand::Move(ref place) => - Some(place_to_operand(place.clone())), - &Operand::Constant(..) => { None }, - } - }) - ).collect()) - }; - block_data.statements.push(release_stmt); - } - // Remember the return destination for later - if let &Some(ref destination) = destination { - returns.push((source_info, destination.0.clone(), destination.1)); - } - } - Some(Terminator { kind: TerminatorKind::Drop { location: ref place, .. }, - source_info }) | - Some(Terminator { kind: TerminatorKind::DropAndReplace { location: ref place, .. }, - source_info }) => { - // Before the call: Release all arguments - if !restricted_validation { - let release_stmt = Statement { - source_info, - kind: StatementKind::Validate(ValidationOp::Release, - vec![place_to_operand(place.clone())]), - }; - block_data.statements.push(release_stmt); - } - // drop doesn't return anything, so we need no acquire. - } - _ => { - // Not a block ending in a Call -> ignore. - } - } - } - // Now we go over the returns we collected to acquire the return values. - for (source_info, dest_place, dest_block) in returns { - emit_acquire( - &mut basic_blocks[dest_block], - source_info, - vec![place_to_operand(dest_place)] - ); - } - - if restricted_validation { - // No part 3 for us. - return; - } - - // PART 3 - // Add ReleaseValid/AcquireValid around Ref and Cast. Again an iterator does not seem very - // suited as we need to add new statements before and after each Ref. - for block_data in basic_blocks { - // We want to insert statements around Ref commands as we iterate. To this end, we - // iterate backwards using indices. - for i in (0..block_data.statements.len()).rev() { - match block_data.statements[i].kind { - // When the borrow of this ref expires, we need to recover validation. - StatementKind::Assign(_, box Rvalue::Ref(_, _, _)) => { - // Due to a lack of NLL; we can't capture anything directly here. - // Instead, we have to re-match and clone there. - let (dest_place, re, src_place) = match block_data.statements[i].kind { - StatementKind::Assign(ref dest_place, - box Rvalue::Ref(re, _, ref src_place)) => { - (dest_place.clone(), re, src_place.clone()) - }, - _ => bug!("We already matched this."), - }; - // So this is a ref, and we got all the data we wanted. - // Do an acquire of the result -- but only what it points to, so add a Deref - // projection. - let acquire_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, - vec![place_to_operand(dest_place.deref())]), - }; - block_data.statements.insert(i+1, acquire_stmt); - - // The source is released until the region of the borrow ends. - let op = match re { - &RegionKind::ReScope(ce) => ValidationOp::Suspend(ce), - &RegionKind::ReErased => - bug!("AddValidation pass must be run before erasing lifetimes"), - _ => ValidationOp::Release, - }; - let release_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(op, vec![place_to_operand(src_place)]), - }; - block_data.statements.insert(i, release_stmt); - } - // Casts can change what validation does (e.g. unsizing) - StatementKind::Assign(_, box Rvalue::Cast(kind, Operand::Copy(_), _)) | - StatementKind::Assign(_, box Rvalue::Cast(kind, Operand::Move(_), _)) - if kind != CastKind::Misc => - { - // Due to a lack of NLL; we can't capture anything directly here. - // Instead, we have to re-match and clone there. - let (dest_place, src_place) = match block_data.statements[i].kind { - StatementKind::Assign(ref dest_place, - box Rvalue::Cast(_, Operand::Copy(ref src_place), _)) | - StatementKind::Assign(ref dest_place, - box Rvalue::Cast(_, Operand::Move(ref src_place), _)) => - { - (dest_place.clone(), src_place.clone()) - }, - _ => bug!("We already matched this."), - }; - - // Acquire of the result - let acquire_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(ValidationOp::Acquire, - vec![place_to_operand(dest_place)]), - }; - block_data.statements.insert(i+1, acquire_stmt); - - // Release of the input - let release_stmt = Statement { - source_info: block_data.statements[i].source_info, - kind: StatementKind::Validate(ValidationOp::Release, - vec![place_to_operand(src_place)]), - }; - block_data.statements.insert(i, release_stmt); - } - _ => {}, - } - } - } - } -} diff --git a/src/librustc_mir/transform/check_unsafety.rs b/src/librustc_mir/transform/check_unsafety.rs index edd15c39fe..3404772f82 100644 --- a/src/librustc_mir/transform/check_unsafety.rs +++ b/src/librustc_mir/transform/check_unsafety.rs @@ -112,8 +112,8 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { StatementKind::SetDiscriminant { .. } | StatementKind::StorageLive(..) | StatementKind::StorageDead(..) | - StatementKind::EndRegion(..) | - StatementKind::Validate(..) | + StatementKind::Retag { .. } | + StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => { // safe (at least as emitted during MIR construction) diff --git a/src/librustc_mir/transform/cleanup_post_borrowck.rs b/src/librustc_mir/transform/cleanup_post_borrowck.rs index 4d3b422ab2..c0edd3926d 100644 --- a/src/librustc_mir/transform/cleanup_post_borrowck.rs +++ b/src/librustc_mir/transform/cleanup_post_borrowck.rs @@ -10,106 +10,26 @@ //! This module provides two passes: //! -//! - `CleanEndRegions`, that reduces the set of `EndRegion` statements -//! in the MIR. -//! - `CleanAscribeUserType`, that replaces all `AscribeUserType` statements -//! with `Nop`. +//! - [CleanAscribeUserType], that replaces all +//! [StatementKind::AscribeUserType] statements with [StatementKind::Nop]. +//! - [CleanFakeReadsAndBorrows], that replaces all [FakeRead] statements and +//! borrows that are read by [FakeReadCause::ForMatchGuard] fake reads with +//! [StatementKind::Nop]. //! -//! The `CleanEndRegions` "pass" is actually implemented as two +//! The [CleanFakeReadsAndBorrows] "pass" is actually implemented as two //! traversals (aka visits) of the input MIR. The first traversal, -//! `GatherBorrowedRegions`, finds all of the regions in the MIR -//! that are involved in a borrow. -//! -//! The second traversal, `DeleteTrivialEndRegions`, walks over the -//! MIR and removes any `EndRegion` that is applied to a region that -//! was not seen in the previous pass. -//! -//! The `CleanAscribeUserType` pass runs at a distinct time from the -//! `CleanEndRegions` pass. It is important that the `CleanAscribeUserType` -//! pass runs after the MIR borrowck so that the NLL type checker can -//! perform the type assertion when it encounters the `AscribeUserType` -//! statements. +//! [DeleteAndRecordFakeReads], deletes the fake reads and finds the temporaries +//! read by [ForMatchGuard] reads, and [DeleteFakeBorrows] deletes the +//! initialization of those temporaries. use rustc_data_structures::fx::FxHashSet; -use rustc::middle::region; use rustc::mir::{BasicBlock, FakeReadCause, Local, Location, Mir, Place}; -use rustc::mir::{Rvalue, Statement, StatementKind}; -use rustc::mir::visit::{MutVisitor, Visitor, TyContext}; -use rustc::ty::{Ty, RegionKind, TyCtxt}; +use rustc::mir::{Statement, StatementKind}; +use rustc::mir::visit::MutVisitor; +use rustc::ty::TyCtxt; use transform::{MirPass, MirSource}; -pub struct CleanEndRegions; - -#[derive(Default)] -struct GatherBorrowedRegions { - seen_regions: FxHashSet, -} - -struct DeleteTrivialEndRegions<'a> { - seen_regions: &'a FxHashSet, -} - -impl MirPass for CleanEndRegions { - fn run_pass<'a, 'tcx>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - _source: MirSource, - mir: &mut Mir<'tcx>) { - if !tcx.emit_end_regions() { return; } - - let mut gather = GatherBorrowedRegions::default(); - gather.visit_mir(mir); - - let mut delete = DeleteTrivialEndRegions { seen_regions: &mut gather.seen_regions }; - delete.visit_mir(mir); - } -} - -impl<'tcx> Visitor<'tcx> for GatherBorrowedRegions { - fn visit_rvalue(&mut self, - rvalue: &Rvalue<'tcx>, - location: Location) { - // Gather regions that are used for borrows - if let Rvalue::Ref(r, _, _) = *rvalue { - if let RegionKind::ReScope(ce) = *r { - self.seen_regions.insert(ce); - } - } - self.super_rvalue(rvalue, location); - } - - fn visit_ty(&mut self, ty: &Ty<'tcx>, _: TyContext) { - // Gather regions that occur in types - for re in ty.walk().flat_map(|t| t.regions()) { - match *re { - RegionKind::ReScope(ce) => { self.seen_regions.insert(ce); } - _ => {}, - } - } - self.super_ty(ty); - } -} - -impl<'a, 'tcx> MutVisitor<'tcx> for DeleteTrivialEndRegions<'a> { - fn visit_statement(&mut self, - block: BasicBlock, - statement: &mut Statement<'tcx>, - location: Location) { - let mut delete_it = false; - - if let StatementKind::EndRegion(ref region_scope) = statement.kind { - if !self.seen_regions.contains(region_scope) { - delete_it = true; - } - } - - if delete_it { - statement.make_nop(); - } - self.super_statement(block, statement, location); - } -} - pub struct CleanAscribeUserType; pub struct DeleteAscribeUserType; diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 51644a6bba..f21efaa048 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -17,13 +17,8 @@ use rustc::mir::{Constant, Location, Place, Mir, Operand, Rvalue, Local}; use rustc::mir::{NullOp, UnOp, StatementKind, Statement, BasicBlock, LocalKind}; use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem}; use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext}; -use rustc::mir::interpret::{ - ConstEvalErr, EvalErrorKind, Scalar, GlobalId, EvalResult -}; +use rustc::mir::interpret::{EvalErrorKind, Scalar, GlobalId, EvalResult}; use rustc::ty::{TyCtxt, self, Instance}; -use interpret::{self, EvalContext, Value, OpTy, MemoryKind, ScalarMaybeUndef}; -use const_eval::{CompileTimeInterpreter, eval_promoted, mk_borrowck_eval_cx}; -use transform::{MirPass, MirSource}; use syntax::source_map::{Span, DUMMY_SP}; use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::IndexVec; @@ -33,6 +28,12 @@ use rustc::ty::layout::{ HasTyCtxt, TargetDataLayout, HasDataLayout, }; +use interpret::{self, EvalContext, ScalarMaybeUndef, Immediate, OpTy, MemoryKind}; +use const_eval::{ + CompileTimeInterpreter, const_to_op, error_to_const_error, eval_promoted, mk_borrowck_eval_cx +}; +use transform::{MirPass, MirSource}; + pub struct ConstProp; impl MirPass for ConstProp { @@ -44,14 +45,24 @@ impl MirPass for ConstProp { if source.promoted.is_some() { return; } - match tcx.describe_def(source.def_id) { - // skip statics/consts because they'll be evaluated by miri anyway - Some(Def::Const(..)) | - Some(Def::Static(..)) => return, - // we still run on associated constants, because they might not get evaluated - // within the current crate - _ => {}, + + use rustc::hir::map::blocks::FnLikeNode; + let node_id = tcx.hir.as_local_node_id(source.def_id) + .expect("Non-local call to local provider is_const_fn"); + + let is_fn_like = FnLikeNode::from_node(tcx.hir.get(node_id)).is_some(); + let is_assoc_const = match tcx.describe_def(source.def_id) { + Some(Def::AssociatedConst(_)) => true, + _ => false, + }; + + // Only run const prop on functions, methods, closures and associated constants + if !is_fn_like && !is_assoc_const { + // skip anon_const/statics/consts because they'll be evaluated by miri anyway + trace!("ConstProp skipped for {:?}", source.def_id); + return } + trace!("ConstProp starting for {:?}", source.def_id); // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold @@ -78,23 +89,23 @@ struct ConstPropagator<'a, 'mir, 'tcx:'a+'mir> { param_env: ParamEnv<'tcx>, } -impl<'a, 'b, 'tcx> LayoutOf for &'a ConstPropagator<'a, 'b, 'tcx> { +impl<'a, 'b, 'tcx> LayoutOf for ConstPropagator<'a, 'b, 'tcx> { type Ty = ty::Ty<'tcx>; type TyLayout = Result, LayoutError<'tcx>>; - fn layout_of(self, ty: ty::Ty<'tcx>) -> Self::TyLayout { + fn layout_of(&self, ty: ty::Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(self.param_env.and(ty)) } } -impl<'a, 'b, 'tcx> HasDataLayout for &'a ConstPropagator<'a, 'b, 'tcx> { +impl<'a, 'b, 'tcx> HasDataLayout for ConstPropagator<'a, 'b, 'tcx> { #[inline] fn data_layout(&self) -> &TargetDataLayout { &self.tcx.data_layout } } -impl<'a, 'b, 'tcx> HasTyCtxt<'tcx> for &'a ConstPropagator<'a, 'b, 'tcx> { +impl<'a, 'b, 'tcx> HasTyCtxt<'tcx> for ConstPropagator<'a, 'b, 'tcx> { #[inline] fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> { self.tcx @@ -144,10 +155,9 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { let r = match f(self) { Ok(val) => Some(val), Err(error) => { - let (stacktrace, span) = self.ecx.generate_stacktrace(None); - let diagnostic = ConstEvalErr { span, error, stacktrace }; + let diagnostic = error_to_const_error(&self.ecx, error); use rustc::mir::interpret::EvalErrorKind::*; - match diagnostic.error.kind { + match diagnostic.error { // don't report these, they make no sense in a const prop context | MachineError(_) // at runtime these transformations might make sense @@ -175,11 +185,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { | InvalidDiscriminant(..) | PointerOutOfBounds { .. } | InvalidNullPointerUsage - | MemoryLockViolation { .. } - | MemoryAcquireConflict { .. } | ValidationFailure(..) - | InvalidMemoryLockRelease { .. } - | DeallocatedLockedMemory { .. } | InvalidPointerMath | ReadUndefBytes(_) | DeadLocal @@ -193,6 +199,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { | CalledClosureAsFunction | VtableForArgumentlessMethod | ModifiedConstantMemory + | ModifiedStatic | AssumptionNotHeld // FIXME: should probably be removed and turned into a bug! call | TypeNotPrimitive(_) @@ -208,7 +215,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { | ReadFromReturnPointer | GeneratorResumedAfterReturn | GeneratorResumedAfterPanic - | ReferencedConstant(_) + | ReferencedConstant | InfiniteLoop => { // FIXME: report UB here @@ -223,7 +230,6 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { | UnimplementedTraitSelection | TypeckError | TooGeneric - | CheckMatchError // these are just noise => {}, @@ -259,21 +265,13 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { source_info: SourceInfo, ) -> Option> { self.ecx.tcx.span = source_info.span; - match self.ecx.const_to_op(c.literal) { + match const_to_op(&self.ecx, c.literal) { Ok(op) => { Some((op, c.span)) }, Err(error) => { - let (stacktrace, span) = self.ecx.generate_stacktrace(None); - let err = ConstEvalErr { - span, - error, - stacktrace, - }; - err.report_as_error( - self.tcx.at(source_info.span), - "could not evaluate constant", - ); + let err = error_to_const_error(&self.ecx, error); + err.report_as_error(self.ecx.tcx, "erroneous constant used"); None }, } @@ -314,7 +312,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { eval_promoted(this.tcx, cid, this.mir, this.param_env) })?; trace!("evaluated promoted {:?} to {:?}", promoted, res); - Some((res, source_info.span)) + Some((res.into(), source_info.span)) }, _ => None, } @@ -359,7 +357,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { Rvalue::NullaryOp(NullOp::SizeOf, ty) => { type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(( OpTy { - op: interpret::Operand::Immediate(Value::Scalar( + op: interpret::Operand::Immediate(Immediate::Scalar( Scalar::Bits { bits: n as u128, size: self.tcx.data_layout.pointer_size.bytes() as u8, @@ -402,7 +400,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { this.ecx.unary_op(op, prim, arg.layout) })?; let res = OpTy { - op: interpret::Operand::Immediate(Value::Scalar(val.into())), + op: interpret::Operand::Immediate(Immediate::Scalar(val.into())), layout: place_layout, }; Some((res, span)) @@ -423,7 +421,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { } let r = self.use_ecx(source_info, |this| { - this.ecx.read_value(right.0) + this.ecx.read_immediate(right.0) })?; if op == BinOp::Shr || op == BinOp::Shl { let left_ty = left.ty(self.mir, self.tcx); @@ -456,14 +454,14 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { } let left = self.eval_operand(left, source_info)?; let l = self.use_ecx(source_info, |this| { - this.ecx.read_value(left.0) + this.ecx.read_immediate(left.0) })?; trace!("const evaluating {:?} for {:?} and {:?}", op, left, right); let (val, overflow) = self.use_ecx(source_info, |this| { - this.ecx.binary_op_val(op, l, r) + this.ecx.binary_op_imm(op, l, r) })?; let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue { - Value::ScalarPair( + Immediate::ScalarPair( val.into(), Scalar::from_bool(overflow).into(), ) @@ -473,7 +471,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { let _: Option<()> = self.use_ecx(source_info, |_| Err(err)); return None; } - Value::Scalar(val.into()) + Immediate::Scalar(val.into()) }; let res = OpTy { op: interpret::Operand::Immediate(val), @@ -596,8 +594,8 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { if let TerminatorKind::Assert { expected, msg, cond, .. } = kind { if let Some(value) = self.eval_operand(cond, source_info) { trace!("assertion on {:?} should be {:?}", value, expected); - let expected = Value::Scalar(Scalar::from_bool(*expected).into()); - if expected != value.0.to_immediate() { + let expected = ScalarMaybeUndef::from(Scalar::from_bool(*expected)); + if expected != self.ecx.read_scalar(value.0).unwrap() { // poison all places this operand references so that further code // doesn't use the invalid value match cond { @@ -633,20 +631,20 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { let len = self .eval_operand(len, source_info) .expect("len must be const"); - let len = match len.0.to_immediate() { - Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { + let len = match self.ecx.read_scalar(len.0) { + Ok(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) => bits, - _ => bug!("const len not primitive: {:?}", len), + other => bug!("const len not primitive: {:?}", other), }; let index = self .eval_operand(index, source_info) .expect("index must be const"); - let index = match index.0.to_immediate() { - Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { + let index = match self.ecx.read_scalar(index.0) { + Ok(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) => bits, - _ => bug!("const index not primitive: {:?}", index), + other => bug!("const index not primitive: {:?}", other), }; format!( "index out of bounds: \ diff --git a/src/librustc_mir/transform/elaborate_drops.rs b/src/librustc_mir/transform/elaborate_drops.rs index 958d4e353c..2b3fd552e0 100644 --- a/src/librustc_mir/transform/elaborate_drops.rs +++ b/src/librustc_mir/transform/elaborate_drops.rs @@ -16,6 +16,7 @@ use dataflow::{drop_flag_effects_for_location, on_lookup_result_bits}; use dataflow::MoveDataParamEnv; use dataflow::{self, do_dataflow, DebugFormatted}; use rustc::ty::{self, TyCtxt}; +use rustc::ty::layout::VariantIdx; use rustc::mir::*; use rustc::util::nodemap::FxHashMap; use rustc_data_structures::bit_set::BitSet; @@ -282,7 +283,7 @@ impl<'a, 'b, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, 'b, 'tcx> { }) } - fn downcast_subpath(&self, path: Self::Path, variant: usize) -> Option { + fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option { dataflow::move_path_children_matching(self.ctxt.move_data(), path, |p| { match p { &Projection { @@ -495,7 +496,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let target = self.patch.new_block(BasicBlockData { statements: vec![assign], terminator: Some(Terminator { - kind: TerminatorKind::Goto { target: target }, + kind: TerminatorKind::Goto { target }, ..*terminator }), is_cleanup: false, diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index c697391d86..a5b5a7e86d 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -12,7 +12,7 @@ //! We want to do this once just before codegen, so codegen does not have to take //! care erasing regions all over the place. //! NOTE: We do NOT erase regions of statements that are relevant for -//! "types-as-contracts"-validation, namely, AcquireValid, ReleaseValid, and EndRegion. +//! "types-as-contracts"-validation, namely, AcquireValid, ReleaseValid use rustc::ty::subst::Substs; use rustc::ty::{self, Ty, TyCtxt}; @@ -22,23 +22,19 @@ use transform::{MirPass, MirSource}; struct EraseRegionsVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - in_validation_statement: bool, } impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { EraseRegionsVisitor { tcx, - in_validation_statement: false, } } } impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { fn visit_ty(&mut self, ty: &mut Ty<'tcx>, _: TyContext) { - if !self.in_validation_statement { - *ty = self.tcx.erase_regions(ty); - } + *ty = self.tcx.erase_regions(ty); self.super_ty(ty); } @@ -58,20 +54,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { block: BasicBlock, statement: &mut Statement<'tcx>, location: Location) { - // Do NOT delete EndRegion if validation statements are emitted. - // Validation needs EndRegion. - if self.tcx.sess.opts.debugging_opts.mir_emit_validate == 0 { - if let StatementKind::EndRegion(_) = statement.kind { - statement.kind = StatementKind::Nop; - } - } - - self.in_validation_statement = match statement.kind { - StatementKind::Validate(..) => true, - _ => false, - }; self.super_statement(block, statement, location); - self.in_validation_statement = false; } } diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index 5889fabee9..24cc4cce74 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -64,6 +64,7 @@ use rustc::hir::def_id::DefId; use rustc::mir::*; use rustc::mir::visit::{PlaceContext, Visitor, MutVisitor}; use rustc::ty::{self, TyCtxt, AdtDef, Ty}; +use rustc::ty::layout::VariantIdx; use rustc::ty::subst::Substs; use util::dump_mir; use util::liveness::{self, IdentityMap}; @@ -158,7 +159,7 @@ struct TransformVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx> TransformVisitor<'a, 'tcx> { // Make a GeneratorState rvalue - fn make_state(&self, idx: usize, val: Operand<'tcx>) -> Rvalue<'tcx> { + fn make_state(&self, idx: VariantIdx, val: Operand<'tcx>) -> Rvalue<'tcx> { let adt = AggregateKind::Adt(self.state_adt_ref, idx, self.state_substs, None, None); Rvalue::Aggregate(box adt, vec![val]) } @@ -229,11 +230,11 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> { }); let ret_val = match data.terminator().kind { - TerminatorKind::Return => Some((1, + TerminatorKind::Return => Some((VariantIdx::new(1), None, Operand::Move(Place::Local(self.new_ret_local)), None)), - TerminatorKind::Yield { ref value, resume, drop } => Some((0, + TerminatorKind::Yield { ref value, resume, drop } => Some((VariantIdx::new(0), Some(resume), value.clone(), drop)), @@ -684,6 +685,13 @@ fn create_generator_drop_shim<'a, 'tcx>( is_block_tail: None, is_user_variable: None, }; + if tcx.sess.opts.debugging_opts.mir_emit_retag { + // Alias tracking must know we changed the type + mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement { + source_info, + kind: StatementKind::EscapeToRaw(Operand::Copy(Place::Local(self_arg()))), + }) + } no_landing_pads(tcx, &mut mir); diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 5963f1a481..1cce0de515 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -19,7 +19,7 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::*; use rustc::mir::visit::*; -use rustc::ty::{self, Instance, InstanceDef, Ty, TyCtxt}; +use rustc::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; use rustc::ty::subst::{Subst,Substs}; use std::collections::VecDeque; @@ -85,39 +85,16 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // Only do inlining into fn bodies. let id = self.tcx.hir.as_local_node_id(self.source.def_id).unwrap(); let body_owner_kind = self.tcx.hir.body_owner_kind(id); + if let (hir::BodyOwnerKind::Fn, None) = (body_owner_kind, self.source.promoted) { for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated() { - // Don't inline calls that are in cleanup blocks. - if bb_data.is_cleanup { continue; } - - // Only consider direct calls to functions - let terminator = bb_data.terminator(); - if let TerminatorKind::Call { - func: ref op, .. } = terminator.kind { - if let ty::FnDef(callee_def_id, substs) = op.ty(caller_mir, self.tcx).sty { - if let Some(instance) = Instance::resolve(self.tcx, - param_env, - callee_def_id, - substs) { - let is_virtual = - if let InstanceDef::Virtual(..) = instance.def { - true - } else { - false - }; - - if !is_virtual { - callsites.push_back(CallSite { - callee: instance.def_id(), - substs: instance.substs, - bb, - location: terminator.source_info - }); - } - } - } - } + if let Some(callsite) = self.get_valid_function_call(bb, + bb_data, + caller_mir, + param_env) { + callsites.push_back(callsite); + } } } else { return; @@ -137,7 +114,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let callee_mir = match self.tcx.try_optimized_mir(callsite.location.span, callsite.callee) { - Ok(callee_mir) if self.should_inline(callsite, callee_mir) => { + Ok(callee_mir) if self.consider_optimizing(callsite, callee_mir) => { self.tcx.subst_and_normalize_erasing_regions( &callsite.substs, param_env, @@ -163,20 +140,13 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // Add callsites from inlined function for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated().skip(start) { - // Only consider direct calls to functions - let terminator = bb_data.terminator(); - if let TerminatorKind::Call { - func: Operand::Constant(ref f), .. } = terminator.kind { - if let ty::FnDef(callee_def_id, substs) = f.ty.sty { - // Don't inline the same function multiple times. - if callsite.callee != callee_def_id { - callsites.push_back(CallSite { - callee: callee_def_id, - substs, - bb, - location: terminator.source_info - }); - } + if let Some(new_callsite) = self.get_valid_function_call(bb, + bb_data, + caller_mir, + param_env) { + // Don't inline the same function multiple times. + if callsite.callee != new_callsite.callee { + callsites.push_back(new_callsite); } } } @@ -198,6 +168,52 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } } + fn get_valid_function_call(&self, + bb: BasicBlock, + bb_data: &BasicBlockData<'tcx>, + caller_mir: &Mir<'tcx>, + param_env: ParamEnv<'tcx>, + ) -> Option> { + // Don't inline calls that are in cleanup blocks. + if bb_data.is_cleanup { return None; } + + // Only consider direct calls to functions + let terminator = bb_data.terminator(); + if let TerminatorKind::Call { func: ref op, .. } = terminator.kind { + if let ty::FnDef(callee_def_id, substs) = op.ty(caller_mir, self.tcx).sty { + let instance = Instance::resolve(self.tcx, + param_env, + callee_def_id, + substs)?; + + if let InstanceDef::Virtual(..) = instance.def { + return None; + } + + return Some(CallSite { + callee: instance.def_id(), + substs: instance.substs, + bb, + location: terminator.source_info + }); + } + } + + None + } + + fn consider_optimizing(&self, + callsite: CallSite<'tcx>, + callee_mir: &Mir<'tcx>) + -> bool + { + debug!("consider_optimizing({:?})", callsite); + self.should_inline(callsite, callee_mir) + && self.tcx.consider_optimizing(|| format!("Inline {:?} into {:?}", + callee_mir.span, + callsite)) + } + fn should_inline(&self, callsite: CallSite<'tcx>, callee_mir: &Mir<'tcx>) @@ -691,6 +707,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { self.in_cleanup_block = false; } + fn visit_retag(&mut self, fn_entry: &mut bool, place: &mut Place<'tcx>, loc: Location) { + self.super_retag(fn_entry, place, loc); + + // We have to patch all inlined retags to be aware that they are no longer + // happening on function entry. + *fn_entry = false; + } + fn visit_terminator_kind(&mut self, block: BasicBlock, kind: &mut TerminatorKind<'tcx>, loc: Location) { self.super_terminator_kind(block, kind, loc); diff --git a/src/librustc_mir/transform/lower_128bit.rs b/src/librustc_mir/transform/lower_128bit.rs index bd7d9d3676..8007215316 100644 --- a/src/librustc_mir/transform/lower_128bit.rs +++ b/src/librustc_mir/transform/lower_128bit.rs @@ -143,7 +143,7 @@ fn check_lang_item_type<'a, 'tcx, D>( { let did = tcx.require_lang_item(lang_item); let poly_sig = tcx.fn_sig(did); - let sig = poly_sig.no_late_bound_regions().unwrap(); + let sig = poly_sig.no_bound_vars().unwrap(); let lhs_ty = lhs.ty(local_decls, tcx); let rhs_ty = rhs.ty(local_decls, tcx); let place_ty = place.ty(local_decls, tcx).to_ty(tcx); diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index d18836999d..a77e9b502a 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -11,7 +11,7 @@ use borrow_check::nll::type_check; use build; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use rustc::mir::{Mir, Promoted}; +use rustc::mir::{Mir, MirPhase, Promoted}; use rustc::ty::TyCtxt; use rustc::ty::query::Providers; use rustc::ty::steal::Steal; @@ -23,7 +23,7 @@ use std::borrow::Cow; use syntax::ast; use syntax_pos::Span; -pub mod add_validation; +pub mod add_retag; pub mod add_moves_for_packed_drops; pub mod cleanup_post_borrowck; pub mod check_unsafety; @@ -72,7 +72,7 @@ fn mir_keys<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, krate: CrateNum) -> Lrc { assert_eq!(krate, LOCAL_CRATE); - let mut set = DefIdSet(); + let mut set = DefIdSet::default(); // All body-owners have MIR associated with them. set.extend(tcx.body_owners()); @@ -155,53 +155,66 @@ pub trait MirPass { mir: &mut Mir<'tcx>); } -pub macro run_passes($tcx:ident, $mir:ident, $def_id:ident, $suite_index:expr; $($pass:expr,)*) {{ - let suite_index: usize = $suite_index; - let run_passes = |mir: &mut _, promoted| { +pub fn run_passes( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &mut Mir<'tcx>, + def_id: DefId, + mir_phase: MirPhase, + passes: &[&dyn MirPass], +) { + let phase_index = mir_phase.phase_index(); + + let run_passes = |mir: &mut Mir<'tcx>, promoted| { + if mir.phase >= mir_phase { + return; + } + let source = MirSource { - def_id: $def_id, - promoted + def_id, + promoted, }; let mut index = 0; let mut run_pass = |pass: &dyn MirPass| { let run_hooks = |mir: &_, index, is_after| { - dump_mir::on_mir_pass($tcx, &format_args!("{:03}-{:03}", suite_index, index), + dump_mir::on_mir_pass(tcx, &format_args!("{:03}-{:03}", phase_index, index), &pass.name(), source, mir, is_after); }; run_hooks(mir, index, false); - pass.run_pass($tcx, source, mir); + pass.run_pass(tcx, source, mir); run_hooks(mir, index, true); index += 1; }; - $(run_pass(&$pass);)* + + for pass in passes { + run_pass(*pass); + } + + mir.phase = mir_phase; }; - run_passes(&mut $mir, None); + run_passes(mir, None); - for (index, promoted_mir) in $mir.promoted.iter_enumerated_mut() { + for (index, promoted_mir) in mir.promoted.iter_enumerated_mut() { run_passes(promoted_mir, Some(index)); - // Let's make sure we don't miss any nested instances - assert!(promoted_mir.promoted.is_empty()); + //Let's make sure we don't miss any nested instances + assert!(promoted_mir.promoted.is_empty()) } -}} +} fn mir_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Steal> { // Unsafety check uses the raw mir, so make sure it is run let _ = tcx.unsafety_check_result(def_id); let mut mir = tcx.mir_built(def_id).steal(); - run_passes![tcx, mir, def_id, 0; - // Remove all `EndRegion` statements that are not involved in borrows. - cleanup_post_borrowck::CleanEndRegions, - + run_passes(tcx, &mut mir, def_id, MirPhase::Const, &[ // What we need to do constant evaluation. - simplify::SimplifyCfg::new("initial"), - type_check::TypeckMir, - rustc_peek::SanityCheck, - uniform_array_move_out::UniformArrayMoveOut, - ]; + &simplify::SimplifyCfg::new("initial"), + &type_check::TypeckMir, + &rustc_peek::SanityCheck, + &uniform_array_move_out::UniformArrayMoveOut, + ]); tcx.alloc_steal_mir(mir) } @@ -214,11 +227,11 @@ fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx } let mut mir = tcx.mir_const(def_id).steal(); - run_passes![tcx, mir, def_id, 1; + run_passes(tcx, &mut mir, def_id, MirPhase::Validated, &[ // What we need to run borrowck etc. - qualify_consts::QualifyAndPromoteConstants, - simplify::SimplifyCfg::new("qualify-consts"), - ]; + &qualify_consts::QualifyAndPromoteConstants, + &simplify::SimplifyCfg::new("qualify-consts"), + ]); tcx.alloc_steal_mir(mir) } @@ -232,59 +245,61 @@ fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx } let mut mir = tcx.mir_validated(def_id).steal(); - run_passes![tcx, mir, def_id, 2; + run_passes(tcx, &mut mir, def_id, MirPhase::Optimized, &[ // Remove all things not needed by analysis - no_landing_pads::NoLandingPads, - simplify_branches::SimplifyBranches::new("initial"), - remove_noop_landing_pads::RemoveNoopLandingPads, + &no_landing_pads::NoLandingPads, + &simplify_branches::SimplifyBranches::new("initial"), + &remove_noop_landing_pads::RemoveNoopLandingPads, // Remove all `AscribeUserType` statements. - cleanup_post_borrowck::CleanAscribeUserType, + &cleanup_post_borrowck::CleanAscribeUserType, // Remove all `FakeRead` statements and the borrows that are only // used for checking matches - cleanup_post_borrowck::CleanFakeReadsAndBorrows, - simplify::SimplifyCfg::new("early-opt"), + &cleanup_post_borrowck::CleanFakeReadsAndBorrows, + + &simplify::SimplifyCfg::new("early-opt"), // These next passes must be executed together - add_call_guards::CriticalCallEdges, - elaborate_drops::ElaborateDrops, - no_landing_pads::NoLandingPads, - // AddValidation needs to run after ElaborateDrops and before EraseRegions, and it needs - // an AllCallEdges pass right before it. - add_call_guards::AllCallEdges, - add_validation::AddValidation, + &add_call_guards::CriticalCallEdges, + &elaborate_drops::ElaborateDrops, + &no_landing_pads::NoLandingPads, // AddMovesForPackedDrops needs to run after drop // elaboration. - add_moves_for_packed_drops::AddMovesForPackedDrops, + &add_moves_for_packed_drops::AddMovesForPackedDrops, + // AddRetag needs to run after ElaborateDrops, and it needs + // an AllCallEdges pass right before it. Otherwise it should + // run fairly late, but before optimizations begin. + &add_call_guards::AllCallEdges, + &add_retag::AddRetag, - simplify::SimplifyCfg::new("elaborate-drops"), + &simplify::SimplifyCfg::new("elaborate-drops"), // No lifetime analysis based on borrowing can be done from here on out. // From here on out, regions are gone. - erase_regions::EraseRegions, + &erase_regions::EraseRegions, - lower_128bit::Lower128Bit, + &lower_128bit::Lower128Bit, // Optimizations begin. - uniform_array_move_out::RestoreSubsliceArrayMoveOut, - inline::Inline, + &uniform_array_move_out::RestoreSubsliceArrayMoveOut, + &inline::Inline, // Lowering generator control-flow and variables // has to happen before we do anything else to them. - generator::StateTransform, + &generator::StateTransform, - instcombine::InstCombine, - const_prop::ConstProp, - simplify_branches::SimplifyBranches::new("after-const-prop"), - deaggregator::Deaggregator, - copy_prop::CopyPropagation, - remove_noop_landing_pads::RemoveNoopLandingPads, - simplify::SimplifyCfg::new("final"), - simplify::SimplifyLocals, + &instcombine::InstCombine, + &const_prop::ConstProp, + &simplify_branches::SimplifyBranches::new("after-const-prop"), + &deaggregator::Deaggregator, + ©_prop::CopyPropagation, + &remove_noop_landing_pads::RemoveNoopLandingPads, + &simplify::SimplifyCfg::new("final"), + &simplify::SimplifyLocals, - add_call_guards::CriticalCallEdges, - dump_mir::Marker("PreCodegen"), - ]; + &add_call_guards::CriticalCallEdges, + &dump_mir::Marker("PreCodegen"), + ]); tcx.alloc_mir(mir) } diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs index 629211a7b5..c5add62607 100644 --- a/src/librustc_mir/transform/promote_consts.rs +++ b/src/librustc_mir/transform/promote_consts.rs @@ -310,16 +310,11 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { match statement.kind { StatementKind::Assign(_, box Rvalue::Ref(_, _, ref mut place)) => { // Find the underlying local for this (necessarily interior) borrow. - // HACK(eddyb) using a recursive function because of mutable borrows. - fn interior_base<'a, 'tcx>(place: &'a mut Place<'tcx>) - -> &'a mut Place<'tcx> { - if let Place::Projection(ref mut proj) = *place { - assert_ne!(proj.elem, ProjectionElem::Deref); - return interior_base(&mut proj.base); - } - place - } - let place = interior_base(place); + let mut place = place; + while let Place::Projection(ref mut proj) = *place { + assert_ne!(proj.elem, ProjectionElem::Deref); + place = &mut proj.base; + }; let ty = place.ty(local_decls, self.tcx).to_ty(self.tcx); let span = statement.source_info.span; @@ -338,6 +333,14 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let operand = Operand::Copy(promoted_place(ty, span)); mem::replace(&mut args[index], operand) } + // We expected a `TerminatorKind::Call` for which we'd like to promote an + // argument. `qualify_consts` saw a `TerminatorKind::Call` here, but + // we are seeing a `Goto`. That means that the `promote_temps` method + // already promoted this call away entirely. This case occurs when calling + // a function requiring a constant argument and as that constant value + // providing a value whose computation contains another call to a function + // requiring a constant argument. + TerminatorKind::Goto { .. } => return, _ => bug!() } } diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index c78fb51961..bcee6d75b5 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -243,13 +243,52 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { return; } - match *dest { - Place::Local(index) if (self.mir.local_kind(index) == LocalKind::Var || - self.mir.local_kind(index) == LocalKind::Arg) && - self.tcx.sess.features_untracked().const_let => { - debug!("store to var {:?}", index); - self.local_qualif[index] = Some(self.qualif); + if self.tcx.features().const_let { + let mut dest = dest; + let index = loop { + match dest { + // with `const_let` active, we treat all locals equal + Place::Local(index) => break *index, + // projections are transparent for assignments + // we qualify the entire destination at once, even if just a field would have + // stricter qualification + Place::Projection(proj) => { + // Catch more errors in the destination. `visit_place` also checks various + // projection rules like union field access and raw pointer deref + self.visit_place( + dest, + PlaceContext::MutatingUse(MutatingUseContext::Store), + location + ); + dest = &proj.base; + }, + Place::Promoted(..) => bug!("promoteds don't exist yet during promotion"), + Place::Static(..) => { + // Catch more errors in the destination. `visit_place` also checks that we + // do not try to access statics from constants or try to mutate statics + self.visit_place( + dest, + PlaceContext::MutatingUse(MutatingUseContext::Store), + location + ); + return; + } + } + }; + debug!("store to var {:?}", index); + match &mut self.local_qualif[index] { + // this is overly restrictive, because even full assignments do not clear the qualif + // While we could special case full assignments, this would be inconsistent with + // aggregates where we overwrite all fields via assignments, which would not get + // that feature. + Some(ref mut qualif) => *qualif = *qualif | self.qualif, + // insert new qualification + qualif @ None => *qualif = Some(self.qualif), } + return; + } + + match *dest { Place::Local(index) if self.mir.local_kind(index) == LocalKind::Temp || self.mir.local_kind(index) == LocalKind::ReturnPointer => { debug!("store to {:?} (temp or return pointer)", index); @@ -318,7 +357,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { TerminatorKind::FalseUnwind { .. } => None, TerminatorKind::Return => { - if !self.tcx.sess.features_untracked().const_let { + if !self.tcx.features().const_let { // Check for unused values. This usually means // there are extra statements in the AST. for temp in mir.temps_iter() { @@ -425,7 +464,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { LocalKind::ReturnPointer => { self.not_const(); } - LocalKind::Var if !self.tcx.sess.features_untracked().const_let => { + LocalKind::Var if !self.tcx.features().const_let => { if self.mode != Mode::Fn { emit_feature_err(&self.tcx.sess.parse_sess, "const_let", self.span, GateIssue::Language, @@ -478,6 +517,16 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { // Only allow statics (not consts) to refer to other statics. if self.mode == Mode::Static || self.mode == Mode::StaticMut { + if context.is_mutating_use() { + // this is not strictly necessary as miri will also bail out + // For interior mutability we can't really catch this statically as that + // goes through raw pointers and intermediate temporaries, so miri has + // to catch this anyway + self.tcx.sess.span_err( + self.span, + "cannot mutate statics in the initializer of another static", + ); + } return; } self.add(Qualif::NOT_CONST); @@ -509,7 +558,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { Mode::Fn => {}, _ => { if let ty::RawPtr(_) = base_ty.sty { - if !this.tcx.sess.features_untracked().const_raw_ptr_deref { + if !this.tcx.features().const_raw_ptr_deref { emit_feature_err( &this.tcx.sess.parse_sess, "const_raw_ptr_deref", this.span, GateIssue::Language, @@ -532,7 +581,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { match this.mode { Mode::Fn => this.not_const(), Mode::ConstFn => { - if !this.tcx.sess.features_untracked().const_fn_union { + if !this.tcx.features().const_fn_union { emit_feature_err( &this.tcx.sess.parse_sess, "const_fn_union", this.span, GateIssue::Language, @@ -758,7 +807,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let Mode::Fn = self.mode { // in normal functions, mark such casts as not promotable self.add(Qualif::NOT_CONST); - } else if !self.tcx.sess.features_untracked().const_raw_ptr_to_usize_cast { + } else if !self.tcx.features().const_raw_ptr_to_usize_cast { // in const fn and constants require the feature gate // FIXME: make it unsafe inside const fn and constants emit_feature_err( @@ -785,7 +834,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let Mode::Fn = self.mode { // raw pointer operations are not allowed inside promoteds self.add(Qualif::NOT_CONST); - } else if !self.tcx.sess.features_untracked().const_compare_raw_pointers { + } else if !self.tcx.features().const_compare_raw_pointers { // require the feature gate inside constants and const fn // FIXME: make it unsafe to use these operations emit_feature_err( @@ -843,141 +892,163 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { let fn_ty = func.ty(self.mir, self.tcx); let mut callee_def_id = None; - let (mut is_shuffle, mut is_const_fn) = (false, false); - if let ty::FnDef(def_id, _) = fn_ty.sty { - callee_def_id = Some(def_id); - match self.tcx.fn_sig(def_id).abi() { - Abi::RustIntrinsic | - Abi::PlatformIntrinsic => { - assert!(!self.tcx.is_const_fn(def_id)); - match &self.tcx.item_name(def_id).as_str()[..] { - | "size_of" - | "min_align_of" - | "needs_drop" - | "type_id" - | "bswap" - | "bitreverse" - | "ctpop" - | "cttz" - | "cttz_nonzero" - | "ctlz" - | "ctlz_nonzero" - | "overflowing_add" - | "overflowing_sub" - | "overflowing_mul" - | "unchecked_shl" - | "unchecked_shr" - | "add_with_overflow" - | "sub_with_overflow" - | "mul_with_overflow" - // no need to check feature gates, intrinsics are only callable from the - // libstd or with forever unstable feature gates - => is_const_fn = true, - // special intrinsic that can be called diretly without an intrinsic - // feature gate needs a language feature gate - "transmute" => { - // never promote transmute calls - if self.mode != Mode::Fn { - is_const_fn = true; - // const eval transmute calls only with the feature gate - if !self.tcx.sess.features_untracked().const_transmute { - emit_feature_err( - &self.tcx.sess.parse_sess, "const_transmute", - self.span, GateIssue::Language, - &format!("The use of std::mem::transmute() \ - is gated in {}s", self.mode)); + let mut is_shuffle = false; + let mut is_const_fn = false; + let mut is_promotable_const_fn = false; + match fn_ty.sty { + ty::FnDef(def_id, _) => { + callee_def_id = Some(def_id); + match self.tcx.fn_sig(def_id).abi() { + Abi::RustIntrinsic | + Abi::PlatformIntrinsic => { + assert!(!self.tcx.is_const_fn(def_id)); + match &self.tcx.item_name(def_id).as_str()[..] { + | "size_of" + | "min_align_of" + | "needs_drop" + | "type_id" + | "bswap" + | "bitreverse" + | "ctpop" + | "cttz" + | "cttz_nonzero" + | "ctlz" + | "ctlz_nonzero" + | "overflowing_add" + | "overflowing_sub" + | "overflowing_mul" + | "unchecked_shl" + | "unchecked_shr" + | "rotate_left" + | "rotate_right" + | "add_with_overflow" + | "sub_with_overflow" + | "mul_with_overflow" + // no need to check feature gates, intrinsics are only callable + // from the libstd or with forever unstable feature gates + => is_const_fn = true, + // special intrinsic that can be called diretly without an intrinsic + // feature gate needs a language feature gate + "transmute" => { + // never promote transmute calls + if self.mode != Mode::Fn { + is_const_fn = true; + // const eval transmute calls only with the feature gate + if !self.tcx.features().const_transmute { + emit_feature_err( + &self.tcx.sess.parse_sess, "const_transmute", + self.span, GateIssue::Language, + &format!("The use of std::mem::transmute() \ + is gated in {}s", self.mode)); + } } } - } - name if name.starts_with("simd_shuffle") => { - is_shuffle = true; - } - - _ => {} - } - } - _ => { - // in normal functions we only care about promotion - if self.mode == Mode::Fn { - // never promote const fn calls of - // functions without #[rustc_promotable] - if self.tcx.is_promotable_const_fn(def_id) { - is_const_fn = true; - } - } else { - // stable const fn or unstable const fns with their feature gate - // active - if self.tcx.is_const_fn(def_id) { - is_const_fn = true; - } else if self.is_const_panic_fn(def_id) { - // check the const_panic feature gate - // FIXME: cannot allow this inside `allow_internal_unstable` because - // that would make `panic!` insta stable in constants, since the - // macro is marked with the attr - if self.tcx.sess.features_untracked().const_panic { - is_const_fn = true; - } else { - // don't allow panics in constants without the feature gate - emit_feature_err( - &self.tcx.sess.parse_sess, - "const_panic", - self.span, - GateIssue::Language, - &format!("panicking in {}s is unstable", self.mode), - ); + name if name.starts_with("simd_shuffle") => { + is_shuffle = true; } - } else if let Some(feature) = self.tcx.is_unstable_const_fn(def_id) { - // check `#[unstable]` const fns or `#[rustc_const_unstable]` - // functions without the feature gate active in this crate to report - // a better error message than the one below - if self.span.allows_unstable() { - // `allow_internal_unstable` can make such calls stable + + _ => {} + } + } + _ => { + // in normal functions we only care about promotion + if self.mode == Mode::Fn { + // never promote const fn calls of + // functions without #[rustc_promotable] + if self.tcx.is_promotable_const_fn(def_id) { + is_const_fn = true; + is_promotable_const_fn = true; + } else if self.tcx.is_const_fn(def_id) { is_const_fn = true; - } else { - let mut err = self.tcx.sess.struct_span_err(self.span, - &format!("`{}` is not yet stable as a const fn", - self.tcx.item_path_str(def_id))); - help!(&mut err, - "in Nightly builds, add `#![feature({})]` \ - to the crate attributes to enable", - feature); - err.emit(); } } else { - // FIXME(#24111) Remove this check when const fn stabilizes - let (msg, note) = if let UnstableFeatures::Disallow = - self.tcx.sess.opts.unstable_features { - (format!("calls in {}s are limited to \ - tuple structs and tuple variants", - self.mode), - Some("a limited form of compile-time function \ - evaluation is available on a nightly \ - compiler via `const fn`")) + // stable const fn or unstable const fns with their feature gate + // active + if self.tcx.is_const_fn(def_id) { + is_const_fn = true; + } else if self.is_const_panic_fn(def_id) { + // check the const_panic feature gate + // FIXME: cannot allow this inside `allow_internal_unstable` + // because that would make `panic!` insta stable in constants, + // since the macro is marked with the attr + if self.tcx.features().const_panic { + is_const_fn = true; + } else { + // don't allow panics in constants without the feature gate + emit_feature_err( + &self.tcx.sess.parse_sess, + "const_panic", + self.span, + GateIssue::Language, + &format!("panicking in {}s is unstable", self.mode), + ); + } + } else if let Some(feat) = self.tcx.is_unstable_const_fn(def_id) { + // check `#[unstable]` const fns or `#[rustc_const_unstable]` + // functions without the feature gate active in this crate to + // report a better error message than the one below + if self.span.allows_unstable() { + // `allow_internal_unstable` can make such calls stable + is_const_fn = true; + } else { + let mut err = self.tcx.sess.struct_span_err(self.span, + &format!("`{}` is not yet stable as a const fn", + self.tcx.item_path_str(def_id))); + help!(&mut err, + "in Nightly builds, add `#![feature({})]` \ + to the crate attributes to enable", + feat); + err.emit(); + } } else { - (format!("calls in {}s are limited \ - to constant functions, \ - tuple structs and tuple variants", - self.mode), - None) - }; - let mut err = struct_span_err!( - self.tcx.sess, - self.span, - E0015, - "{}", - msg, - ); - if let Some(note) = note { - err.span_note(self.span, note); + // FIXME(#24111) Remove this check when const fn stabilizes + let (msg, note) = if let UnstableFeatures::Disallow = + self.tcx.sess.opts.unstable_features { + (format!("calls in {}s are limited to \ + tuple structs and tuple variants", + self.mode), + Some("a limited form of compile-time function \ + evaluation is available on a nightly \ + compiler via `const fn`")) + } else { + (format!("calls in {}s are limited \ + to constant functions, \ + tuple structs and tuple variants", + self.mode), + None) + }; + let mut err = struct_span_err!( + self.tcx.sess, + self.span, + E0015, + "{}", + msg, + ); + if let Some(note) = note { + err.span_note(self.span, note); + } + err.emit(); } - err.emit(); } } } + }, + ty::FnPtr(_) => { + if self.mode != Mode::Fn { + let mut err = self.tcx.sess.struct_span_err( + self.span, + &format!("function pointers are not allowed in const fn")); + err.emit(); + } + }, + _ => { + self.not_const(); + return } } + let constant_arguments = callee_def_id.and_then(|id| { args_required_const(self.tcx, id) }); @@ -1006,7 +1077,17 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if !constant_arguments.contains(&i) { return } - if this.qualif.is_empty() { + // Since the argument is required to be constant, + // we care about constness, not promotability. + // If we checked for promotability, we'd miss out on + // the results of function calls (which are never promoted + // in runtime code) + // This is not a problem, because the argument explicitly + // requests constness, in contrast to regular promotion + // which happens even without the user requesting it. + // We can error out with a hard error if the argument is not + // constant here. + if (this.qualif - Qualif::NOT_PROMOTABLE).is_empty() { debug!("visit_terminator_kind: candidate={:?}", candidate); this.promotion_candidates.push(candidate); } else { @@ -1036,7 +1117,11 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { // Be conservative about the returned value of a const fn. let tcx = self.tcx; let ty = dest.ty(self.mir, tcx).to_ty(tcx); - self.qualif = Qualif::empty(); + if is_const_fn && !is_promotable_const_fn && self.mode == Mode::Fn { + self.qualif = Qualif::NOT_PROMOTABLE; + } else { + self.qualif = Qualif::empty(); + } self.add_type(ty); } self.assign(dest, location); @@ -1088,7 +1173,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let (Mode::ConstFn, &Place::Local(index)) = (self.mode, dest) { if self.mir.local_kind(index) == LocalKind::Var && self.const_fn_arg_vars.insert(index) && - !self.tcx.sess.features_untracked().const_let { + !self.tcx.features().const_let { // Direct use of an argument is permitted. match *rvalue { @@ -1147,8 +1232,8 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::InlineAsm {..} | - StatementKind::EndRegion(_) | - StatementKind::Validate(..) | + StatementKind::Retag { .. } | + StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => {} } diff --git a/src/librustc_mir/transform/qualify_min_const_fn.rs b/src/librustc_mir/transform/qualify_min_const_fn.rs index b6f4f4b1de..13e134ba85 100644 --- a/src/librustc_mir/transform/qualify_min_const_fn.rs +++ b/src/librustc_mir/transform/qualify_min_const_fn.rs @@ -241,8 +241,8 @@ fn check_statement( // These are all NOPs | StatementKind::StorageLive(_) | StatementKind::StorageDead(_) - | StatementKind::Validate(..) - | StatementKind::EndRegion(_) + | StatementKind::Retag { .. } + | StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => Ok(()), } diff --git a/src/librustc_mir/transform/remove_noop_landing_pads.rs b/src/librustc_mir/transform/remove_noop_landing_pads.rs index 4b4b284b02..a31d12baed 100644 --- a/src/librustc_mir/transform/remove_noop_landing_pads.rs +++ b/src/librustc_mir/transform/remove_noop_landing_pads.rs @@ -52,12 +52,9 @@ impl RemoveNoopLandingPads { StatementKind::FakeRead(..) | StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | - StatementKind::EndRegion(_) | StatementKind::AscribeUserType(..) | StatementKind::Nop => { - // These are all nops in a landing pad (there's some - // borrowck interaction between EndRegion and storage - // instructions, but this should all run after borrowck). + // These are all nops in a landing pad } StatementKind::Assign(Place::Local(_), box Rvalue::Use(_)) => { @@ -65,10 +62,11 @@ impl RemoveNoopLandingPads { // turn a landing pad to a non-nop } - StatementKind::Assign(_, _) | + StatementKind::Assign { .. } | StatementKind::SetDiscriminant { .. } | StatementKind::InlineAsm { .. } | - StatementKind::Validate { .. } => { + StatementKind::Retag { .. } | + StatementKind::EscapeToRaw { .. } => { return false; } } diff --git a/src/librustc_mir/transform/rustc_peek.rs b/src/librustc_mir/transform/rustc_peek.rs index 05044574e5..f852195b83 100644 --- a/src/librustc_mir/transform/rustc_peek.rs +++ b/src/librustc_mir/transform/rustc_peek.rs @@ -161,8 +161,8 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir::StatementKind::StorageLive(_) | mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | - mir::StatementKind::EndRegion(_) | - mir::StatementKind::Validate(..) | + mir::StatementKind::Retag { .. } | + mir::StatementKind::EscapeToRaw { .. } | mir::StatementKind::AscribeUserType(..) | mir::StatementKind::Nop => continue, mir::StatementKind::SetDiscriminant{ .. } => diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs index c20d40af50..f643870dec 100644 --- a/src/librustc_mir/transform/simplify.rs +++ b/src/librustc_mir/transform/simplify.rs @@ -302,7 +302,7 @@ impl MirPass for SimplifyLocals { let map = make_local_map(&mut mir.local_decls, marker.locals); // Update references to all vars and tmps now - LocalUpdater { map: map }.visit_mir(mir); + LocalUpdater { map }.visit_mir(mir); mir.local_decls.shrink_to_fit(); } } diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs index e14941b8ae..b248980954 100644 --- a/src/librustc_mir/transform/simplify_branches.rs +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -57,7 +57,7 @@ impl MirPass for SimplifyBranches { TerminatorKind::Assert { target, cond: Operand::Constant(ref c), expected, .. } if (c.literal.assert_bool(tcx) == Some(true)) == expected => { - TerminatorKind::Goto { target: target } + TerminatorKind::Goto { target } }, TerminatorKind::FalseEdges { real_target, .. } => { TerminatorKind::Goto { target: real_target } diff --git a/src/librustc_mir/util/alignment.rs b/src/librustc_mir/util/alignment.rs index 8717bd08ae..a96c5dd687 100644 --- a/src/librustc_mir/util/alignment.rs +++ b/src/librustc_mir/util/alignment.rs @@ -30,7 +30,7 @@ pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let ty = place.ty(local_decls, tcx).to_ty(tcx); match tcx.layout_raw(param_env.and(ty)) { - Ok(layout) if layout.align.abi() == 1 => { + Ok(layout) if layout.align.abi.bytes() == 1 => { // if the alignment is 1, the type can't be further // disaligned. debug!("is_disaligned({:?}) - align = 1", place); diff --git a/src/librustc_mir/util/borrowck_errors.rs b/src/librustc_mir/util/borrowck_errors.rs index 1715086686..ae0483e3c1 100644 --- a/src/librustc_mir/util/borrowck_errors.rs +++ b/src/librustc_mir/util/borrowck_errors.rs @@ -575,7 +575,7 @@ pub trait BorrowckErrors<'cx>: Sized + Copy { OGN = o ); err.span_label(mutate_span, format!("cannot {}", action)); - err.span_label(match_span, format!("value is immutable in match guard")); + err.span_label(match_span, String::from("value is immutable in match guard")); self.cancel_if_wrong_origin(err, o) } diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index d9a1e4a0fd..2c7f337b3b 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -14,6 +14,7 @@ use rustc::mir::*; use rustc::middle::lang_items; use rustc::traits::Reveal; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::VariantIdx; use rustc::ty::subst::Substs; use rustc::ty::util::IntTypeExt; use rustc_data_structures::indexed_vec::Idx; @@ -94,7 +95,7 @@ pub trait DropElaborator<'a, 'tcx: 'a> : fmt::Debug { fn field_subpath(&self, path: Self::Path, field: Field) -> Option; fn deref_subpath(&self, path: Self::Path) -> Option; - fn downcast_subpath(&self, path: Self::Path, variant: usize) -> Option; + fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option; fn array_subpath(&self, path: Self::Path, index: u32, size: u32) -> Option; } @@ -392,7 +393,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let fields = self.move_paths_for_fields( self.place, self.path, - &adt.variants[0], + &adt.variants[VariantIdx::new(0)], substs ); self.drop_ladder(fields, succ, unwind) @@ -416,7 +417,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let mut have_otherwise = false; - for (variant_index, discr) in adt.discriminants(self.tcx()).enumerate() { + for (variant_index, discr) in adt.discriminants(self.tcx()) { let subpath = self.elaborator.downcast_subpath( self.path, variant_index); if let Some(variant_path) = subpath { @@ -894,7 +895,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let tcx = self.tcx(); let unit_temp = Place::Local(self.new_temp(tcx.mk_unit())); let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem); - let args = adt.variants[0].fields.iter().enumerate().map(|(i, f)| { + let args = adt.variants[VariantIdx::new(0)].fields.iter().enumerate().map(|(i, f)| { let field = Field::new(i); let field_ty = f.ty(self.tcx(), substs); Operand::Move(self.place.clone().field(field, field_ty)) diff --git a/src/librustc_mir/util/liveness.rs b/src/librustc_mir/util/liveness.rs index d16094e823..12c13b8f81 100644 --- a/src/librustc_mir/util/liveness.rs +++ b/src/librustc_mir/util/liveness.rs @@ -204,7 +204,7 @@ pub fn categorize<'tcx>(context: PlaceContext<'tcx>) -> Option { PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) | PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) | PlaceContext::NonUse(NonUseContext::AscribeUserTy) | - PlaceContext::NonUse(NonUseContext::Validate) => + PlaceContext::MutatingUse(MutatingUseContext::Retag) => Some(DefUse::Use), /////////////////////////////////////////////////////////////////////////// diff --git a/src/librustc_msan/Cargo.toml b/src/librustc_msan/Cargo.toml index 17ec2b9643..29165675a2 100644 --- a/src/librustc_msan/Cargo.toml +++ b/src/librustc_msan/Cargo.toml @@ -15,6 +15,5 @@ cmake = "0.1.18" [dependencies] alloc = { path = "../liballoc" } -alloc_system = { path = "../liballoc_system" } core = { path = "../libcore" } compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/librustc_msan/lib.rs b/src/librustc_msan/lib.rs index 7b845e631f..47f917e40c 100644 --- a/src/librustc_msan/lib.rs +++ b/src/librustc_msan/lib.rs @@ -9,7 +9,6 @@ // except according to those terms. #![sanitizer_runtime] -#![feature(alloc_system)] #![feature(nll)] #![feature(sanitizer_runtime)] #![feature(staged_api)] @@ -17,10 +16,3 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] - -extern crate alloc_system; - -use alloc_system::System; - -#[global_allocator] -static ALLOC: System = System; diff --git a/src/librustc_passes/ast_validation.rs b/src/librustc_passes/ast_validation.rs index 0e9596244c..b878a330ab 100644 --- a/src/librustc_passes/ast_validation.rs +++ b/src/librustc_passes/ast_validation.rs @@ -403,25 +403,6 @@ impl<'a> Visitor<'a> for AstValidator<'a> { } } } - ItemKind::TraitAlias(Generics { ref params, .. }, ..) => { - for param in params { - match param.kind { - GenericParamKind::Lifetime { .. } => {} - GenericParamKind::Type { ref default, .. } => { - if !param.bounds.is_empty() { - self.err_handler() - .span_err(param.ident.span, "type parameters on the left \ - side of a trait alias cannot be bounded"); - } - if !default.is_none() { - self.err_handler() - .span_err(param.ident.span, "type parameters on the left \ - side of a trait alias cannot have defaults"); - } - } - } - } - } ItemKind::Mod(_) => { // Ensure that `path` attributes on modules are recorded as used (c.f. #35584). attr::first_attr_value_str_by_name(&item.attrs, "path"); @@ -691,5 +672,5 @@ pub fn check_crate(session: &Session, krate: &Crate) { is_banned: false, }, krate); - visit::walk_crate(&mut AstValidator { session: session }, krate) + visit::walk_crate(&mut AstValidator { session }, krate) } diff --git a/src/librustc_passes/mir_stats.rs b/src/librustc_passes/mir_stats.rs index 06c8545aac..fb37f03a1c 100644 --- a/src/librustc_passes/mir_stats.rs +++ b/src/librustc_passes/mir_stats.rs @@ -83,8 +83,8 @@ impl<'a, 'tcx> mir_visit::Visitor<'tcx> for StatCollector<'a, 'tcx> { self.record(match statement.kind { StatementKind::Assign(..) => "StatementKind::Assign", StatementKind::FakeRead(..) => "StatementKind::FakeRead", - StatementKind::EndRegion(..) => "StatementKind::EndRegion", - StatementKind::Validate(..) => "StatementKind::Validate", + StatementKind::Retag { .. } => "StatementKind::Retag", + StatementKind::EscapeToRaw { .. } => "StatementKind::EscapeToRaw", StatementKind::SetDiscriminant { .. } => "StatementKind::SetDiscriminant", StatementKind::StorageLive(..) => "StatementKind::StorageLive", StatementKind::StorageDead(..) => "StatementKind::StorageDead", diff --git a/src/librustc_passes/rvalue_promotion.rs b/src/librustc_passes/rvalue_promotion.rs index 5e9169e86a..099c6df32a 100644 --- a/src/librustc_passes/rvalue_promotion.rs +++ b/src/librustc_passes/rvalue_promotion.rs @@ -84,10 +84,10 @@ fn rvalue_promotable_map<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &ty::TypeckTables::empty(None), in_fn: false, in_static: false, - mut_rvalue_borrows: NodeSet(), + mut_rvalue_borrows: Default::default(), param_env: ty::ParamEnv::empty(), identity_substs: Substs::empty(), - result: ItemLocalSet(), + result: ItemLocalSet::default(), }; // `def_id` should be a `Body` owner @@ -172,7 +172,7 @@ impl<'a, 'gcx> CheckCrateVisitor<'a, 'gcx> { /// While the `ExprUseVisitor` walks, we will identify which /// expressions are borrowed, and insert their ids into this /// table. Actually, we insert the "borrow-id", which is normally - /// the id of the expession being borrowed: but in the case of + /// the id of the expression being borrowed: but in the case of /// `ref mut` borrows, the `id` of the pattern is /// inserted. Therefore later we remove that entry from the table /// and transfer it over to the value being matched. This will @@ -663,6 +663,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { let mut cur = cmt; loop { match cur.cat { + Categorization::ThreadLocal(..) | Categorization::Rvalue(..) => { if loan_cause == euv::MatchDiscriminant { // Ignore the dummy immutable borrow created by EUV. diff --git a/src/librustc_privacy/lib.rs b/src/librustc_privacy/lib.rs index 5f8c7daea6..6d9abbf5af 100644 --- a/src/librustc_privacy/lib.rs +++ b/src/librustc_privacy/lib.rs @@ -969,7 +969,7 @@ impl<'a, 'tcx> TypeVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { Some(poly_projection_predicate.skip_binder() .projection_ty.trait_ref(self.tcx)) } - ty::Predicate::TypeOutlives(..) => None, + ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => None, _ => bug!("unexpected predicate: {:?}", predicate), }; if let Some(trait_ref) = trait_ref { @@ -1761,7 +1761,7 @@ fn privacy_access_levels<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx, access_levels: &visitor.access_levels, in_variant: false, - old_error_set: NodeSet(), + old_error_set: Default::default(), }; intravisit::walk_crate(&mut visitor, krate); diff --git a/src/librustc_resolve/build_reduced_graph.rs b/src/librustc_resolve/build_reduced_graph.rs index 715b788ea8..0fa41cb484 100644 --- a/src/librustc_resolve/build_reduced_graph.rs +++ b/src/librustc_resolve/build_reduced_graph.rs @@ -40,7 +40,7 @@ use syntax::ext::base::{MacroKind, SyntaxExtension}; use syntax::ext::base::Determinacy::Undetermined; use syntax::ext::hygiene::Mark; use syntax::ext::tt::macro_rules; -use syntax::feature_gate::is_builtin_attr; +use syntax::feature_gate::{is_builtin_attr, emit_feature_err, GateIssue}; use syntax::parse::token::{self, Token}; use syntax::std_inject::injected_crate_name; use syntax::symbol::keywords; @@ -153,7 +153,7 @@ impl<'a, 'cl> Resolver<'a, 'cl> { let empty_for_self = |prefix: &[Segment]| { prefix.is_empty() || - prefix.len() == 1 && prefix[0].ident.name == keywords::CrateRoot.name() + prefix.len() == 1 && prefix[0].ident.name == keywords::CrateRoot.name() }; match use_tree.kind { ast::UseTreeKind::Simple(rename, ..) => { @@ -344,9 +344,23 @@ impl<'a, 'cl> Resolver<'a, 'cl> { } ItemKind::ExternCrate(orig_name) => { - let crate_id = self.crate_loader.process_extern_crate(item, &self.definitions); - let module = - self.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX }); + let module = if orig_name.is_none() && ident.name == keywords::SelfValue.name() { + self.session + .struct_span_err(item.span, "`extern crate self;` requires renaming") + .span_suggestion(item.span, "try", "extern crate self as name;".into()) + .emit(); + return; + } else if orig_name == Some(keywords::SelfValue.name()) { + if !self.session.features_untracked().extern_crate_self { + emit_feature_err(&self.session.parse_sess, "extern_crate_self", item.span, + GateIssue::Language, "`extern crate self` is unstable"); + } + self.graph_root + } else { + let crate_id = self.crate_loader.process_extern_crate(item, &self.definitions); + self.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX }) + }; + self.populate_module_if_necessary(module); if injected_crate_name().map_or(false, |name| ident.name == name) { self.injected_crate = Some(module); @@ -768,6 +782,12 @@ impl<'a, 'cl> Resolver<'a, 'cl> { span_err!(self.session, item.span, E0468, "an `extern crate` loading macros must be at the crate root"); } + if let ItemKind::ExternCrate(Some(orig_name)) = item.node { + if orig_name == keywords::SelfValue.name() { + self.session.span_err(attr.span, + "`macro_use` is not supported on `extern crate self`"); + } + } let ill_formed = |span| span_err!(self.session, span, E0466, "bad macro import"); match attr.meta() { Some(meta) => match meta.node { diff --git a/src/librustc_resolve/check_unused.rs b/src/librustc_resolve/check_unused.rs index ddcaf128bf..659ca1f5b9 100644 --- a/src/librustc_resolve/check_unused.rs +++ b/src/librustc_resolve/check_unused.rs @@ -162,7 +162,7 @@ pub fn check_crate(resolver: &mut Resolver, krate: &ast::Crate) { let mut visitor = UnusedImportCheckVisitor { resolver, - unused_imports: NodeMap(), + unused_imports: Default::default(), base_id: ast::DUMMY_NODE_ID, item_span: DUMMY_SP, }; diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index b5290bc82b..fdac1e3b81 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -549,9 +549,9 @@ impl<'a> PathSource<'a> { match self { PathSource::Type => match def { Def::Struct(..) | Def::Union(..) | Def::Enum(..) | - Def::Trait(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | - Def::PrimTy(..) | Def::TyParam(..) | Def::SelfTy(..) | - Def::Existential(..) | + Def::Trait(..) | Def::TraitAlias(..) | Def::TyAlias(..) | + Def::AssociatedTy(..) | Def::PrimTy(..) | Def::TyParam(..) | + Def::SelfTy(..) | Def::Existential(..) | Def::ForeignTy(..) => true, _ => false, }, @@ -1760,13 +1760,8 @@ impl<'a, 'crateloader> Resolver<'a, 'crateloader> { let segments = &path.segments; let path = Segment::from_path(&path); // FIXME (Manishearth): Intra doc links won't get warned of epoch changes - let def = match self.resolve_path_without_parent_scope( - &path, - Some(namespace), - true, - span, - CrateLint::No, - ) { + let def = match self.resolve_path_without_parent_scope(&path, Some(namespace), true, + span, CrateLint::No) { PathResult::Module(ModuleOrUniformRoot::Module(module)) => module.def().unwrap(), PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => @@ -1879,22 +1874,22 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { primitive_type_table: PrimitiveTypeTable::new(), - def_map: NodeMap(), - import_map: NodeMap(), - freevars: NodeMap(), - freevars_seen: NodeMap(), + def_map: Default::default(), + import_map: Default::default(), + freevars: Default::default(), + freevars_seen: Default::default(), export_map: FxHashMap::default(), - trait_map: NodeMap(), + trait_map: Default::default(), module_map, - block_map: NodeMap(), + block_map: Default::default(), extern_module_map: FxHashMap::default(), binding_parent_modules: FxHashMap::default(), make_glob_map: make_glob_map == MakeGlobMap::Yes, - glob_map: NodeMap(), + glob_map: Default::default(), used_imports: FxHashSet::default(), - maybe_unused_trait_imports: NodeSet(), + maybe_unused_trait_imports: Default::default(), maybe_unused_extern_crates: Vec::new(), unused_labels: FxHashMap::default(), @@ -1924,7 +1919,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { name_already_seen: FxHashMap::default(), whitelisted_legacy_custom_derives: Vec::new(), potentially_unused_imports: Vec::new(), - struct_constructors: DefIdMap(), + struct_constructors: Default::default(), found_unresolved_macro: false, unused_macros: FxHashSet::default(), current_type_ascription: Vec::new(), @@ -2378,13 +2373,9 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { self.with_current_self_item(item, |this| { this.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { let item_def_id = this.definitions.local_def_id(item.id); - if this.session.features_untracked().self_in_typedefs { - this.with_self_rib(Def::SelfTy(None, Some(item_def_id)), |this| { - visit::walk_item(this, item); - }); - } else { + this.with_self_rib(Def::SelfTy(None, Some(item_def_id)), |this| { visit::walk_item(this, item); - } + }); }); }); } @@ -3190,16 +3181,8 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { if is_self_type(path, ns) { __diagnostic_used!(E0411); err.code(DiagnosticId::Error("E0411".into())); - let available_in = if this.session.features_untracked().self_in_typedefs { - "impls, traits, and type definitions" - } else { - "traits and impls" - }; - err.span_label(span, format!("`Self` is only available in {}", available_in)); - if this.current_self_item.is_some() && nightly_options::is_nightly_build() { - err.help("add #![feature(self_in_typedefs)] to the crate attributes \ - to enable"); - } + err.span_label(span, format!("`Self` is only available in impls, traits, \ + and type definitions")); return (err, Vec::new()); } if is_self_value(path, ns) { @@ -3291,7 +3274,10 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { return (err, candidates); } (Def::TyAlias(..), PathSource::Trait(_)) => { - err.span_label(span, "type aliases cannot be used for traits"); + err.span_label(span, "type aliases cannot be used as traits"); + if nightly_options::is_nightly_build() { + err.note("did you mean to use a trait alias?"); + } return (err, candidates); } (Def::Mod(..), PathSource::Expr(Some(parent))) => match parent.node { @@ -4055,7 +4041,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // report an error. if record_used { resolve_error(self, span, - ResolutionError::CannotCaptureDynamicEnvironmentInFnItem); + ResolutionError::CannotCaptureDynamicEnvironmentInFnItem); } return Def::Err; } @@ -4063,7 +4049,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // Still doesn't deal with upvars if record_used { resolve_error(self, span, - ResolutionError::AttemptToUseNonConstantValueInConstant); + ResolutionError::AttemptToUseNonConstantValueInConstant); } return Def::Err; } @@ -4587,7 +4573,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // declared as public (due to pruning, we don't explore // outside crate private modules => no need to check this) if !in_module_is_extern || name_binding.vis == ty::Visibility::Public { - candidates.push(ImportSuggestion { path: path }); + candidates.push(ImportSuggestion { path }); } } } @@ -4684,7 +4670,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { span: name_binding.span, segments: path_segments, }; - result = Some((module, ImportSuggestion { path: path })); + result = Some((module, ImportSuggestion { path })); } else { // add the module to the lookup if seen_modules.insert(module.def_id().unwrap()) { @@ -5184,7 +5170,7 @@ fn show_candidates(err: &mut DiagnosticBuilder, err.span_suggestions_with_applicability( span, &msg, - path_strings, + path_strings.into_iter(), Applicability::Unspecified, ); } else { diff --git a/src/librustc_resolve/resolve_imports.rs b/src/librustc_resolve/resolve_imports.rs index 1aa40e3b6c..e7cd32f4e8 100644 --- a/src/librustc_resolve/resolve_imports.rs +++ b/src/librustc_resolve/resolve_imports.rs @@ -636,7 +636,7 @@ impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { let mut errors = false; let mut seen_spans = FxHashSet::default(); let mut error_vec = Vec::new(); - let mut prev_root_id: NodeId = NodeId::new(0); + let mut prev_root_id: NodeId = NodeId::from_u32(0); for i in 0 .. self.determined_imports.len() { let import = self.determined_imports[i]; if let Some((span, err, note)) = self.finalize_import(import) { diff --git a/src/librustc_save_analysis/json_dumper.rs b/src/librustc_save_analysis/json_dumper.rs index d2354f38e2..ca336ceb38 100644 --- a/src/librustc_save_analysis/json_dumper.rs +++ b/src/librustc_save_analysis/json_dumper.rs @@ -71,7 +71,7 @@ impl<'b> JsonDumper> { config: Config, ) -> JsonDumper> { JsonDumper { - output: CallbackOutput { callback: callback }, + output: CallbackOutput { callback }, config: config.clone(), result: Analysis::new(config), } diff --git a/src/librustc_target/Cargo.toml b/src/librustc_target/Cargo.toml index bb686e914a..dfdd7f0ae5 100644 --- a/src/librustc_target/Cargo.toml +++ b/src/librustc_target/Cargo.toml @@ -12,7 +12,5 @@ crate-type = ["dylib"] bitflags = "1.0" log = "0.4" rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } +rustc_data_structures = { path = "../librustc_data_structures" } serialize = { path = "../libserialize" } - -[features] -jemalloc = [] diff --git a/src/librustc_target/README.md b/src/librustc_target/README.md index f5b1acb192..a22000ea9d 100644 --- a/src/librustc_target/README.md +++ b/src/librustc_target/README.md @@ -3,4 +3,4 @@ specific to different compilation targets and so forth. For more information about how rustc works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ +[rustc guide]: https://rust-lang.github.io/rustc-guide/ diff --git a/src/librustc_target/abi/call/aarch64.rs b/src/librustc_target/abi/call/aarch64.rs index 90b5b97b51..b4d393749c 100644 --- a/src/librustc_target/abi/call/aarch64.rs +++ b/src/librustc_target/abi/call/aarch64.rs @@ -11,7 +11,7 @@ use abi::call::{FnType, ArgType, Reg, RegKind, Uniform}; use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; -fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) -> Option where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout @@ -41,7 +41,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) }) } -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -75,7 +75,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) ret.make_indirect(); } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -109,7 +109,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) arg.make_indirect(); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/call/amdgpu.rs b/src/librustc_target/abi/call/amdgpu.rs index 62462f04d8..85789d7d4d 100644 --- a/src/librustc_target/abi/call/amdgpu.rs +++ b/src/librustc_target/abi/call/amdgpu.rs @@ -11,21 +11,21 @@ use abi::call::{ArgType, FnType, }; use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; -fn classify_ret_ty<'a, Ty, C>(_tuncx: C, ret: &mut ArgType<'a, Ty>) +fn classify_ret_ty<'a, Ty, C>(_cx: &C, ret: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { ret.extend_integer_width_to(32); } -fn classify_arg_ty<'a, Ty, C>(_cx: C, arg: &mut ArgType<'a, Ty>) +fn classify_arg_ty<'a, Ty, C>(_cx: &C, arg: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { arg.extend_integer_width_to(32); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/call/arm.rs b/src/librustc_target/abi/call/arm.rs index 249aad2d93..bf497c09bd 100644 --- a/src/librustc_target/abi/call/arm.rs +++ b/src/librustc_target/abi/call/arm.rs @@ -12,7 +12,7 @@ use abi::call::{Conv, FnType, ArgType, Reg, RegKind, Uniform}; use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; use spec::HasTargetSpec; -fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) -> Option where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout @@ -42,7 +42,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) }) } -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, vfp: bool) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -77,7 +77,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool) ret.make_indirect(); } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -93,7 +93,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool) } } - let align = arg.layout.align.abi(); + let align = arg.layout.align.abi.bytes(); let total = arg.layout.size; arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, @@ -101,7 +101,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool) }); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout + HasTargetSpec { diff --git a/src/librustc_target/abi/call/asmjs.rs b/src/librustc_target/abi/call/asmjs.rs index 81d6f7b134..3bd2594bdb 100644 --- a/src/librustc_target/abi/call/asmjs.rs +++ b/src/librustc_target/abi/call/asmjs.rs @@ -16,7 +16,7 @@ use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; // See the https://github.com/kripken/emscripten-fastcomp-clang repository. // The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions. -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -42,7 +42,7 @@ fn classify_arg_ty(arg: &mut ArgType) { } } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/call/mips.rs b/src/librustc_target/abi/call/mips.rs index 1e8af52e3e..abe0bd0789 100644 --- a/src/librustc_target/abi/call/mips.rs +++ b/src/librustc_target/abi/call/mips.rs @@ -11,7 +11,7 @@ use abi::call::{ArgType, FnType, Reg, Uniform}; use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods}; -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType, offset: &mut Size) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType, offset: &mut Size) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { if !ret.layout.is_aggregate() { @@ -22,29 +22,29 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType, offset: &mut Size) } } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType, offset: &mut Size) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType, offset: &mut Size) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; if arg.layout.is_aggregate() { arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if !offset.is_abi_aligned(align) { + if !offset.is_aligned(align) { arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = offset.abi_align(align) + size.abi_align(align); + *offset = offset.align_to(align) + size.align_to(align); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { let mut offset = Size::ZERO; diff --git a/src/librustc_target/abi/call/mips64.rs b/src/librustc_target/abi/call/mips64.rs index 8e2dd99696..d375b16316 100644 --- a/src/librustc_target/abi/call/mips64.rs +++ b/src/librustc_target/abi/call/mips64.rs @@ -27,7 +27,7 @@ fn extend_integer_width_mips(arg: &mut ArgType, bits: u64) { arg.extend_integer_width_to(bits); } -fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option +fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgType<'a, Ty>, i: usize) -> Option where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -41,7 +41,7 @@ fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option } } -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -83,7 +83,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) } } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -118,9 +118,9 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) // We only care about aligned doubles if let abi::Abi::Scalar(ref scalar) = field.abi { if let abi::Float(abi::FloatTy::F64) = scalar.value { - if offset.is_abi_aligned(dl.f64_align) { + if offset.is_aligned(dl.f64_align.abi) { // Insert enough integers to cover [last_offset, offset) - assert!(last_offset.is_abi_aligned(dl.f64_align)); + assert!(last_offset.is_aligned(dl.f64_align.abi)); for _ in 0..((offset - last_offset).bits() / 64) .min((prefix.len() - prefix_index) as u64) { @@ -151,7 +151,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) }); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/call/mod.rs b/src/librustc_target/abi/call/mod.rs index 788497a378..489bb37fc2 100644 --- a/src/librustc_target/abi/call/mod.rs +++ b/src/librustc_target/abi/call/mod.rs @@ -137,28 +137,28 @@ impl Reg { } impl Reg { - pub fn align(&self, cx: C) -> Align { + pub fn align(&self, cx: &C) -> Align { let dl = cx.data_layout(); match self.kind { RegKind::Integer => { match self.size.bits() { - 1 => dl.i1_align, - 2..=8 => dl.i8_align, - 9..=16 => dl.i16_align, - 17..=32 => dl.i32_align, - 33..=64 => dl.i64_align, - 65..=128 => dl.i128_align, + 1 => dl.i1_align.abi, + 2..=8 => dl.i8_align.abi, + 9..=16 => dl.i16_align.abi, + 17..=32 => dl.i32_align.abi, + 33..=64 => dl.i64_align.abi, + 65..=128 => dl.i128_align.abi, _ => panic!("unsupported integer: {:?}", self) } } RegKind::Float => { match self.size.bits() { - 32 => dl.f32_align, - 64 => dl.f64_align, + 32 => dl.f32_align.abi, + 64 => dl.f64_align.abi, _ => panic!("unsupported float: {:?}", self) } } - RegKind::Vector => dl.vector_align(self.size) + RegKind::Vector => dl.vector_align(self.size).abi, } } } @@ -188,7 +188,7 @@ impl From for Uniform { } impl Uniform { - pub fn align(&self, cx: C) -> Align { + pub fn align(&self, cx: &C) -> Align { self.unit.align(cx) } } @@ -225,15 +225,15 @@ impl CastTarget { } } - pub fn size(&self, cx: C) -> Size { + pub fn size(&self, cx: &C) -> Size { (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64) - .abi_align(self.rest.align(cx)) + self.rest.total + .align_to(self.rest.align(cx)) + self.rest.total } - pub fn align(&self, cx: C) -> Align { + pub fn align(&self, cx: &C) -> Align { self.prefix.iter() .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx))) - .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), + .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| acc.max(align)) } } @@ -249,8 +249,8 @@ impl<'a, Ty> TyLayout<'a, Ty> { } } - fn homogeneous_aggregate(&self, cx: C) -> Option - where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf + Copy + fn homogeneous_aggregate(&self, cx: &C) -> Option + where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf { match self.abi { Abi::Uninhabited => None, @@ -369,7 +369,7 @@ impl<'a, Ty> ArgType<'a, Ty> { attrs.pointee_size = self.layout.size; // FIXME(eddyb) We should be doing this, but at least on // i686-pc-windows-msvc, it results in wrong stack offsets. - // attrs.pointee_align = Some(self.layout.align); + // attrs.pointee_align = Some(self.layout.align.abi); let extra_attrs = if self.layout.is_unsized() { Some(ArgAttributes::new()) @@ -483,7 +483,7 @@ pub struct FnType<'a, Ty> { } impl<'a, Ty> FnType<'a, Ty> { - pub fn adjust_for_cabi(&mut self, cx: C, abi: ::spec::abi::Abi) -> Result<(), String> + pub fn adjust_for_cabi(&mut self, cx: &C, abi: ::spec::abi::Abi) -> Result<(), String> where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout + HasTargetSpec { diff --git a/src/librustc_target/abi/call/powerpc.rs b/src/librustc_target/abi/call/powerpc.rs index 3be3034143..a71f322632 100644 --- a/src/librustc_target/abi/call/powerpc.rs +++ b/src/librustc_target/abi/call/powerpc.rs @@ -11,7 +11,7 @@ use abi::call::{ArgType, FnType, Reg, Uniform}; use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods}; -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType, offset: &mut Size) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType, offset: &mut Size) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { if !ret.layout.is_aggregate() { @@ -22,29 +22,29 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType, offset: &mut Size) } } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType, offset: &mut Size) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType, offset: &mut Size) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; if arg.layout.is_aggregate() { arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if !offset.is_abi_aligned(align) { + if !offset.is_aligned(align) { arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = offset.abi_align(align) + size.abi_align(align); + *offset = offset.align_to(align) + size.align_to(align); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { let mut offset = Size::ZERO; diff --git a/src/librustc_target/abi/call/powerpc64.rs b/src/librustc_target/abi/call/powerpc64.rs index 0c5ec77a39..99f07c5702 100644 --- a/src/librustc_target/abi/call/powerpc64.rs +++ b/src/librustc_target/abi/call/powerpc64.rs @@ -13,16 +13,17 @@ // need to be fixed when PowerPC vector support is added. use abi::call::{FnType, ArgType, Reg, RegKind, Uniform}; -use abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; +use abi::{Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; +use spec::HasTargetSpec; #[derive(Debug, Clone, Copy, PartialEq)] enum ABI { ELFv1, // original ABI used for powerpc64 (big-endian) - ELFv2, // newer ABI used for powerpc64le + ELFv2, // newer ABI used for powerpc64le and musl (both endians) } use self::ABI::*; -fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI) +fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI) -> Option where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout @@ -52,7 +53,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: AB }) } -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, abi: ABI) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -75,7 +76,9 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI) let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { - let unit = if bits <= 8 { + let unit = if cx.data_layout().endian == Endian::Big { + Reg { kind: RegKind::Integer, size } + } else if bits <= 8 { Reg::i8() } else if bits <= 16 { Reg::i16() @@ -95,7 +98,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI) ret.make_indirect(); } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -110,22 +113,15 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI) } let size = arg.layout.size; - let (unit, total) = match abi { - ELFv1 => { - // In ELFv1, aggregates smaller than a doubleword should appear in - // the least-significant bits of the parameter doubleword. The rest - // should be padded at their tail to fill out multiple doublewords. - if size.bits() <= 64 { - (Reg { kind: RegKind::Integer, size }, size) - } else { - let align = Align::from_bits(64, 64).unwrap(); - (Reg::i64(), size.abi_align(align)) - } - }, - ELFv2 => { - // In ELFv2, we can just cast directly. - (Reg::i64(), size) - }, + let (unit, total) = if size.bits() <= 64 { + // Aggregates smaller than a doubleword should appear in + // the least-significant bits of the parameter doubleword. + (Reg { kind: RegKind::Integer, size }, size) + } else { + // Aggregates larger than a doubleword should be padded + // at the tail to fill out a whole number of doublewords. + let reg_i64 = Reg::i64(); + (reg_i64, size.align_to(reg_i64.align(cx))) }; arg.cast_to(Uniform { @@ -134,13 +130,17 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI) }); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, - C: LayoutOf> + HasDataLayout + C: LayoutOf> + HasDataLayout + HasTargetSpec { - let abi = match cx.data_layout().endian { - Endian::Big => ELFv1, - Endian::Little => ELFv2, + let abi = if cx.target_spec().target_env == "musl" { + ELFv2 + } else { + match cx.data_layout().endian { + Endian::Big => ELFv1, + Endian::Little => ELFv2 + } }; if !fty.ret.is_ignore() { diff --git a/src/librustc_target/abi/call/s390x.rs b/src/librustc_target/abi/call/s390x.rs index 37be6ea41c..d6d8ea7191 100644 --- a/src/librustc_target/abi/call/s390x.rs +++ b/src/librustc_target/abi/call/s390x.rs @@ -24,7 +24,7 @@ fn classify_ret_ty<'a, Ty, C>(ret: &mut ArgType) } } -fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool +fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool where Ty: TyLayoutMethods<'a, C>, C: LayoutOf> + HasDataLayout { @@ -41,7 +41,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool } } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -67,7 +67,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) } } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/call/sparc.rs b/src/librustc_target/abi/call/sparc.rs index 1e8af52e3e..abe0bd0789 100644 --- a/src/librustc_target/abi/call/sparc.rs +++ b/src/librustc_target/abi/call/sparc.rs @@ -11,7 +11,7 @@ use abi::call::{ArgType, FnType, Reg, Uniform}; use abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods}; -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType, offset: &mut Size) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType, offset: &mut Size) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { if !ret.layout.is_aggregate() { @@ -22,29 +22,29 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType, offset: &mut Size) } } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType, offset: &mut Size) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType, offset: &mut Size) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; if arg.layout.is_aggregate() { arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if !offset.is_abi_aligned(align) { + if !offset.is_aligned(align) { arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = offset.abi_align(align) + size.abi_align(align); + *offset = offset.align_to(align) + size.align_to(align); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType) where Ty: TyLayoutMethods<'a, C>, C: LayoutOf + HasDataLayout { let mut offset = Size::ZERO; diff --git a/src/librustc_target/abi/call/sparc64.rs b/src/librustc_target/abi/call/sparc64.rs index a58aebc2ff..a609feb3f5 100644 --- a/src/librustc_target/abi/call/sparc64.rs +++ b/src/librustc_target/abi/call/sparc64.rs @@ -13,7 +13,7 @@ use abi::call::{FnType, ArgType, Reg, RegKind, Uniform}; use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; -fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) -> Option where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout @@ -41,7 +41,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) }) } -fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) +fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -69,7 +69,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>) ret.make_indirect(); } -fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) +fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -95,7 +95,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>) }); } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/call/x86.rs b/src/librustc_target/abi/call/x86.rs index 1dcaafcf77..9a95e5b192 100644 --- a/src/librustc_target/abi/call/x86.rs +++ b/src/librustc_target/abi/call/x86.rs @@ -18,7 +18,7 @@ pub enum Flavor { Fastcall } -fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool +fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { @@ -35,7 +35,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool } } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>, flavor: Flavor) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>, flavor: Flavor) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout + HasTargetSpec { diff --git a/src/librustc_target/abi/call/x86_64.rs b/src/librustc_target/abi/call/x86_64.rs index eade086ec4..f091f80924 100644 --- a/src/librustc_target/abi/call/x86_64.rs +++ b/src/librustc_target/abi/call/x86_64.rs @@ -31,17 +31,17 @@ struct Memory; const LARGEST_VECTOR_SIZE: usize = 512; const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64; -fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>) +fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgType<'a, Ty>) -> Result<[Option; MAX_EIGHTBYTES], Memory> where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { - fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>, + fn classify<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>, cls: &mut [Option], off: Size) -> Result<(), Memory> where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { - if !off.is_abi_aligned(layout.align) { + if !off.is_aligned(layout.align.abi) { if !layout.is_zst() { return Err(Memory); } @@ -178,7 +178,7 @@ fn cast_target(cls: &[Option], size: Size) -> CastTarget { target } -pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>) +pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 1a5d2801af..50ce0ad691 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -13,57 +13,71 @@ pub use self::Primitive::*; use spec::Target; -use std::{cmp, fmt}; +use std::fmt; use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; + pub mod call; /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) /// for a target, which contains everything needed to compute layouts. pub struct TargetDataLayout { pub endian: Endian, - pub i1_align: Align, - pub i8_align: Align, - pub i16_align: Align, - pub i32_align: Align, - pub i64_align: Align, - pub i128_align: Align, - pub f32_align: Align, - pub f64_align: Align, + pub i1_align: AbiAndPrefAlign, + pub i8_align: AbiAndPrefAlign, + pub i16_align: AbiAndPrefAlign, + pub i32_align: AbiAndPrefAlign, + pub i64_align: AbiAndPrefAlign, + pub i128_align: AbiAndPrefAlign, + pub f32_align: AbiAndPrefAlign, + pub f64_align: AbiAndPrefAlign, pub pointer_size: Size, - pub pointer_align: Align, - pub aggregate_align: Align, + pub pointer_align: AbiAndPrefAlign, + pub aggregate_align: AbiAndPrefAlign, /// Alignments for vector types. - pub vector_align: Vec<(Size, Align)> + pub vector_align: Vec<(Size, AbiAndPrefAlign)>, + + pub instruction_address_space: u32, } impl Default for TargetDataLayout { /// Creates an instance of `TargetDataLayout`. fn default() -> TargetDataLayout { + let align = |bits| Align::from_bits(bits).unwrap(); TargetDataLayout { endian: Endian::Big, - i1_align: Align::from_bits(8, 8).unwrap(), - i8_align: Align::from_bits(8, 8).unwrap(), - i16_align: Align::from_bits(16, 16).unwrap(), - i32_align: Align::from_bits(32, 32).unwrap(), - i64_align: Align::from_bits(32, 64).unwrap(), - i128_align: Align::from_bits(32, 64).unwrap(), - f32_align: Align::from_bits(32, 32).unwrap(), - f64_align: Align::from_bits(64, 64).unwrap(), + i1_align: AbiAndPrefAlign::new(align(8)), + i8_align: AbiAndPrefAlign::new(align(8)), + i16_align: AbiAndPrefAlign::new(align(16)), + i32_align: AbiAndPrefAlign::new(align(32)), + i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, + i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, + f32_align: AbiAndPrefAlign::new(align(32)), + f64_align: AbiAndPrefAlign::new(align(64)), pointer_size: Size::from_bits(64), - pointer_align: Align::from_bits(64, 64).unwrap(), - aggregate_align: Align::from_bits(0, 64).unwrap(), + pointer_align: AbiAndPrefAlign::new(align(64)), + aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) }, vector_align: vec![ - (Size::from_bits(64), Align::from_bits(64, 64).unwrap()), - (Size::from_bits(128), Align::from_bits(128, 128).unwrap()) - ] + (Size::from_bits(64), AbiAndPrefAlign::new(align(64))), + (Size::from_bits(128), AbiAndPrefAlign::new(align(128))), + ], + instruction_address_space: 0, } } } impl TargetDataLayout { pub fn parse(target: &Target) -> Result { + // Parse an address space index from a string. + let parse_address_space = |s: &str, cause: &str| { + s.parse::().map_err(|err| { + format!("invalid address space `{}` for `{}` in \"data-layout\": {}", + s, cause, err) + }) + }; + // Parse a bit count from a string. let parse_bits = |s: &str, kind: &str, cause: &str| { s.parse::().map_err(|err| { @@ -82,11 +96,17 @@ impl TargetDataLayout { if s.is_empty() { return Err(format!("missing alignment for `{}` in \"data-layout\"", cause)); } + let align_from_bits = |bits| { + Align::from_bits(bits).map_err(|err| { + format!("invalid alignment for `{}` in \"data-layout\": {}", + cause, err) + }) + }; let abi = parse_bits(s[0], "alignment", cause)?; let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?; - Align::from_bits(abi, pref).map_err(|err| { - format!("invalid alignment for `{}` in \"data-layout\": {}", - cause, err) + Ok(AbiAndPrefAlign { + abi: align_from_bits(abi)?, + pref: align_from_bits(pref)?, }) }; @@ -96,6 +116,9 @@ impl TargetDataLayout { match spec.split(':').collect::>()[..] { ["e"] => dl.endian = Endian::Little, ["E"] => dl.endian = Endian::Big, + [p] if p.starts_with("P") => { + dl.instruction_address_space = parse_address_space(&p[1..], "P")? + } ["a", ref a..] => dl.aggregate_align = align(a, "a")?, ["f32", ref a..] => dl.f32_align = align(a, "f32")?, ["f64", ref a..] => dl.f64_align = align(a, "f64")?, @@ -190,7 +213,7 @@ impl TargetDataLayout { } } - pub fn vector_align(&self, vec_size: Size) -> Align { + pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign { for &(size, align) in &self.vector_align { if size == vec_size { return align; @@ -198,23 +221,22 @@ impl TargetDataLayout { } // Default to natural alignment, which is what LLVM does. // That is, use the size, rounded up to a power of 2. - let align = vec_size.bytes().next_power_of_two(); - Align::from_bytes(align, align).unwrap() + AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap()) } } -pub trait HasDataLayout: Copy { +pub trait HasDataLayout { fn data_layout(&self) -> &TargetDataLayout; } -impl<'a> HasDataLayout for &'a TargetDataLayout { +impl HasDataLayout for TargetDataLayout { fn data_layout(&self) -> &TargetDataLayout { self } } /// Endianness of the target, which must match cfg(target-endian). -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq)] pub enum Endian { Little, Big @@ -255,19 +277,19 @@ impl Size { } #[inline] - pub fn abi_align(self, align: Align) -> Size { - let mask = align.abi() - 1; + pub fn align_to(self, align: Align) -> Size { + let mask = align.bytes() - 1; Size::from_bytes((self.bytes() + mask) & !mask) } #[inline] - pub fn is_abi_aligned(self, align: Align) -> bool { - let mask = align.abi() - 1; + pub fn is_aligned(self, align: Align) -> bool { + let mask = align.bytes() - 1; self.bytes() & mask == 0 } #[inline] - pub fn checked_add(self, offset: Size, cx: C) -> Option { + pub fn checked_add(self, offset: Size, cx: &C) -> Option { let dl = cx.data_layout(); let bytes = self.bytes().checked_add(offset.bytes())?; @@ -280,7 +302,7 @@ impl Size { } #[inline] - pub fn checked_mul(self, count: u64, cx: C) -> Option { + pub fn checked_mul(self, count: u64, cx: &C) -> Option { let dl = cx.data_layout(); let bytes = self.bytes().checked_mul(count)?; @@ -343,78 +365,45 @@ impl AddAssign for Size { } } -/// Alignment of a type in bytes, both ABI-mandated and preferred. -/// Each field is a power of two, giving the alignment a maximum value -/// of 2(28 - 1), which is limited by LLVM to a -/// maximum capacity of 229 or 536870912. -#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Debug, RustcEncodable, RustcDecodable)] +/// Alignment of a type in bytes (always a power of two). +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Align { - abi_pow2: u8, - pref_pow2: u8, + pow2: u8, } impl Align { - pub fn from_bits(abi: u64, pref: u64) -> Result { - Align::from_bytes(Size::from_bits(abi).bytes(), - Size::from_bits(pref).bytes()) + pub fn from_bits(bits: u64) -> Result { + Align::from_bytes(Size::from_bits(bits).bytes()) } - pub fn from_bytes(abi: u64, pref: u64) -> Result { - let log2 = |align: u64| { - // Treat an alignment of 0 bytes like 1-byte alignment. - if align == 0 { - return Ok(0); - } - - let mut bytes = align; - let mut pow: u8 = 0; - while (bytes & 1) == 0 { - pow += 1; - bytes >>= 1; - } - if bytes != 1 { - Err(format!("`{}` is not a power of 2", align)) - } else if pow > 29 { - Err(format!("`{}` is too large", align)) - } else { - Ok(pow) - } - }; - - Ok(Align { - abi_pow2: log2(abi)?, - pref_pow2: log2(pref)?, - }) - } - - pub fn abi(self) -> u64 { - 1 << self.abi_pow2 - } - - pub fn pref(self) -> u64 { - 1 << self.pref_pow2 - } - - pub fn abi_bits(self) -> u64 { - self.abi() * 8 - } - - pub fn pref_bits(self) -> u64 { - self.pref() * 8 - } - - pub fn min(self, other: Align) -> Align { - Align { - abi_pow2: cmp::min(self.abi_pow2, other.abi_pow2), - pref_pow2: cmp::min(self.pref_pow2, other.pref_pow2), + pub fn from_bytes(align: u64) -> Result { + // Treat an alignment of 0 bytes like 1-byte alignment. + if align == 0 { + return Ok(Align { pow2: 0 }); } + + let mut bytes = align; + let mut pow2: u8 = 0; + while (bytes & 1) == 0 { + pow2 += 1; + bytes >>= 1; + } + if bytes != 1 { + return Err(format!("`{}` is not a power of 2", align)); + } + if pow2 > 29 { + return Err(format!("`{}` is too large", align)); + } + + Ok(Align { pow2 }) } - pub fn max(self, other: Align) -> Align { - Align { - abi_pow2: cmp::max(self.abi_pow2, other.abi_pow2), - pref_pow2: cmp::max(self.pref_pow2, other.pref_pow2), - } + pub fn bytes(self) -> u64 { + 1 << self.pow2 + } + + pub fn bits(self) -> u64 { + self.bytes() * 8 } /// Compute the best alignment possible for the given offset @@ -422,10 +411,8 @@ impl Align { /// /// NB: for an offset of `0`, this happens to return `2^64`. pub fn max_for_offset(offset: Size) -> Align { - let pow2 = offset.bytes().trailing_zeros() as u8; Align { - abi_pow2: pow2, - pref_pow2: pow2, + pow2: offset.bytes().trailing_zeros() as u8, } } @@ -436,6 +423,36 @@ impl Align { } } +/// A pair of aligments, ABI-mandated and preferred. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct AbiAndPrefAlign { + pub abi: Align, + pub pref: Align, +} + +impl AbiAndPrefAlign { + pub fn new(align: Align) -> AbiAndPrefAlign { + AbiAndPrefAlign { + abi: align, + pref: align, + } + } + + pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { + AbiAndPrefAlign { + abi: self.abi.min(other.abi), + pref: self.pref.min(other.pref), + } + } + + pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { + AbiAndPrefAlign { + abi: self.abi.max(other.abi), + pref: self.pref.max(other.pref), + } + } +} + /// Integers, also used for enum discriminants. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Integer { @@ -457,7 +474,7 @@ impl Integer { } } - pub fn align(self, cx: C) -> Align { + pub fn align(self, cx: &C) -> AbiAndPrefAlign { let dl = cx.data_layout(); match self { @@ -492,12 +509,11 @@ impl Integer { } /// Find the smallest integer with the given alignment. - pub fn for_abi_align(cx: C, align: Align) -> Option { + pub fn for_align(cx: &C, wanted: Align) -> Option { let dl = cx.data_layout(); - let wanted = align.abi(); for &candidate in &[I8, I16, I32, I64, I128] { - if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() { + if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() { return Some(candidate); } } @@ -505,13 +521,12 @@ impl Integer { } /// Find the largest integer with the given alignment or less. - pub fn approximate_abi_align(cx: C, align: Align) -> Integer { + pub fn approximate_align(cx: &C, wanted: Align) -> Integer { let dl = cx.data_layout(); - let wanted = align.abi(); // FIXME(eddyb) maybe include I128 in the future, when it works everywhere. for &candidate in &[I64, I32, I16] { - if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() { + if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() { return candidate; } } @@ -571,7 +586,7 @@ pub enum Primitive { } impl<'a, 'tcx> Primitive { - pub fn size(self, cx: C) -> Size { + pub fn size(self, cx: &C) -> Size { let dl = cx.data_layout(); match self { @@ -582,7 +597,7 @@ impl<'a, 'tcx> Primitive { } } - pub fn align(self, cx: C) -> Align { + pub fn align(self, cx: &C) -> AbiAndPrefAlign { let dl = cx.data_layout(); match self { @@ -642,7 +657,7 @@ impl Scalar { /// Returns the valid range as a `x..y` range. /// /// If `x` and `y` are equal, the range is full, not empty. - pub fn valid_range_exclusive(&self, cx: C) -> Range { + pub fn valid_range_exclusive(&self, cx: &C) -> Range { // For a (max) value of -1, max will be `-1 as usize`, which overflows. // However, that is fine here (it would still represent the full range), // i.e., if the range is everything. @@ -812,11 +827,15 @@ impl Abi { } } +newtype_index! { + pub struct VariantIdx { .. } +} + #[derive(PartialEq, Eq, Hash, Debug)] pub enum Variants { /// Single enum variants, structs/tuples, unions, and all non-ADTs. Single { - index: usize + index: VariantIdx, }, /// General-case enums: for each case there is a struct, and they all have @@ -824,7 +843,7 @@ pub enum Variants { /// at a non-0 offset, after where the tag would go. Tagged { tag: Scalar, - variants: Vec, + variants: IndexVec, }, /// Multiple cases distinguished by a niche (values invalid for a type): @@ -836,11 +855,11 @@ pub enum Variants { /// `None` has a null pointer for the second tuple field, and /// `Some` is the identity function (with a non-null reference). NicheFilling { - dataful_variant: usize, - niche_variants: RangeInclusive, + dataful_variant: VariantIdx, + niche_variants: RangeInclusive, niche: Scalar, niche_start: u128, - variants: Vec, + variants: IndexVec, } } @@ -849,16 +868,16 @@ pub struct LayoutDetails { pub variants: Variants, pub fields: FieldPlacement, pub abi: Abi, - pub align: Align, + pub align: AbiAndPrefAlign, pub size: Size } impl LayoutDetails { - pub fn scalar(cx: C, scalar: Scalar) -> Self { + pub fn scalar(cx: &C, scalar: Scalar) -> Self { let size = scalar.value.size(cx); let align = scalar.value.align(cx); LayoutDetails { - variants: Variants::Single { index: 0 }, + variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Union(0), abi: Abi::Scalar(scalar), size, @@ -891,20 +910,24 @@ pub trait LayoutOf { type Ty; type TyLayout; - fn layout_of(self, ty: Self::Ty) -> Self::TyLayout; + fn layout_of(&self, ty: Self::Ty) -> Self::TyLayout; } pub trait TyLayoutMethods<'a, C: LayoutOf>: Sized { - fn for_variant(this: TyLayout<'a, Self>, cx: C, variant_index: usize) -> TyLayout<'a, Self>; - fn field(this: TyLayout<'a, Self>, cx: C, i: usize) -> C::TyLayout; + fn for_variant( + this: TyLayout<'a, Self>, + cx: &C, + variant_index: VariantIdx, + ) -> TyLayout<'a, Self>; + fn field(this: TyLayout<'a, Self>, cx: &C, i: usize) -> C::TyLayout; } impl<'a, Ty> TyLayout<'a, Ty> { - pub fn for_variant(self, cx: C, variant_index: usize) -> Self + pub fn for_variant(self, cx: &C, variant_index: VariantIdx) -> Self where Ty: TyLayoutMethods<'a, C>, C: LayoutOf { Ty::for_variant(self, cx, variant_index) } - pub fn field(self, cx: C, i: usize) -> C::TyLayout + pub fn field(self, cx: &C, i: usize) -> C::TyLayout where Ty: TyLayoutMethods<'a, C>, C: LayoutOf { Ty::field(self, cx, i) } @@ -926,8 +949,4 @@ impl<'a, Ty> TyLayout<'a, Ty> { Abi::Aggregate { sized } => sized && self.size.bytes() == 0 } } - - pub fn size_and_align(&self) -> (Size, Align) { - (self.size, self.align) - } } diff --git a/src/librustc_target/lib.rs b/src/librustc_target/lib.rs index 10ba27e38f..813b39de06 100644 --- a/src/librustc_target/lib.rs +++ b/src/librustc_target/lib.rs @@ -22,9 +22,10 @@ html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(box_syntax)] -#![cfg_attr(stage0, feature(min_const_fn))] #![feature(nll)] +#![feature(rustc_attrs)] #![feature(slice_patterns)] +#![feature(step_trait)] #[macro_use] extern crate bitflags; @@ -37,5 +38,8 @@ extern crate serialize as rustc_serialize; // used by deriving #[allow(unused_extern_crates)] extern crate rustc_cratesio_shim; +#[macro_use] +extern crate rustc_data_structures; + pub mod abi; pub mod spec; diff --git a/src/librustc_target/spec/aarch64_fuchsia.rs b/src/librustc_target/spec/aarch64_fuchsia.rs index 8f7ee11d57..9ef4fe3b3a 100644 --- a/src/librustc_target/spec/aarch64_fuchsia.rs +++ b/src/librustc_target/spec/aarch64_fuchsia.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use spec::{LinkerFlavor, Target, TargetOptions, TargetResult}; +use spec::{LldFlavor, LinkerFlavor, Target, TargetOptions, TargetResult}; pub fn target() -> TargetResult { let mut base = super::fuchsia_base::opts(); @@ -24,7 +24,7 @@ pub fn target() -> TargetResult { target_os: "fuchsia".to_string(), target_env: String::new(), target_vendor: String::new(), - linker_flavor: LinkerFlavor::Gcc, + linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), options: TargetOptions { abi_blacklist: super::arm_base::abi_blacklist(), .. base diff --git a/src/librustc_target/spec/aarch64_unknown_freebsd.rs b/src/librustc_target/spec/aarch64_unknown_freebsd.rs index 541f0564a0..b120f57192 100644 --- a/src/librustc_target/spec/aarch64_unknown_freebsd.rs +++ b/src/librustc_target/spec/aarch64_unknown_freebsd.rs @@ -14,9 +14,6 @@ pub fn target() -> TargetResult { let mut base = super::freebsd_base::opts(); base.max_atomic_width = Some(128); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "aarch64-unknown-freebsd".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_target/spec/aarch64_unknown_linux_gnu.rs b/src/librustc_target/spec/aarch64_unknown_linux_gnu.rs index 2351d01469..af7ec6a178 100644 --- a/src/librustc_target/spec/aarch64_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/aarch64_unknown_linux_gnu.rs @@ -14,9 +14,6 @@ pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); base.max_atomic_width = Some(128); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "aarch64-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_target/spec/aarch64_unknown_linux_musl.rs b/src/librustc_target/spec/aarch64_unknown_linux_musl.rs index 5ab55a076f..e5ca91aabe 100644 --- a/src/librustc_target/spec/aarch64_unknown_linux_musl.rs +++ b/src/librustc_target/spec/aarch64_unknown_linux_musl.rs @@ -14,9 +14,6 @@ pub fn target() -> TargetResult { let mut base = super::linux_musl_base::opts(); base.max_atomic_width = Some(128); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "aarch64-unknown-linux-musl".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_target/spec/apple_base.rs b/src/librustc_target/spec/apple_base.rs index 38b3f2528f..8774c15ff0 100644 --- a/src/librustc_target/spec/apple_base.rs +++ b/src/librustc_target/spec/apple_base.rs @@ -44,7 +44,6 @@ pub fn opts() -> TargetOptions { dll_suffix: ".dylib".to_string(), archive_format: "bsd".to_string(), pre_link_args: LinkArgs::new(), - exe_allocation_crate: super::maybe_jemalloc(), has_elf_tls: version >= (10, 7), abi_return_struct_as_int: true, emit_debug_gdb_scripts: false, diff --git a/src/librustc_target/spec/apple_ios_base.rs b/src/librustc_target/spec/apple_ios_base.rs index 296eaca7c7..e926e4913d 100644 --- a/src/librustc_target/spec/apple_ios_base.rs +++ b/src/librustc_target/spec/apple_ios_base.rs @@ -99,10 +99,6 @@ pub fn opts(arch: Arch) -> Result { pre_link_args, has_elf_tls: false, eliminate_frame_pointer: false, - // The following line is a workaround for jemalloc 4.5 being broken on - // ios. jemalloc 5.0 is supposed to fix this. - // see https://github.com/rust-lang/rust/issues/45262 - exe_allocation_crate: None, .. super::apple_base::opts() }) } diff --git a/src/librustc_target/spec/arm_linux_androideabi.rs b/src/librustc_target/spec/arm_linux_androideabi.rs index c5e3385a91..055bca6c34 100644 --- a/src/librustc_target/spec/arm_linux_androideabi.rs +++ b/src/librustc_target/spec/arm_linux_androideabi.rs @@ -14,7 +14,7 @@ pub fn target() -> TargetResult { let mut base = super::android_base::opts(); // https://developer.android.com/ndk/guides/abis.html#armeabi base.features = "+strict-align,+v5te".to_string(); - base.max_atomic_width = Some(64); + base.max_atomic_width = Some(32); Ok(Target { llvm_target: "arm-linux-androideabi".to_string(), diff --git a/src/librustc_target/spec/cloudabi_base.rs b/src/librustc_target/spec/cloudabi_base.rs index 2ffa74e737..fb78cf495e 100644 --- a/src/librustc_target/spec/cloudabi_base.rs +++ b/src/librustc_target/spec/cloudabi_base.rs @@ -38,7 +38,6 @@ pub fn opts() -> TargetOptions { // dynamic linking. tls_model: "local-exec".to_string(), relro_level: RelroLevel::Full, - exe_allocation_crate: super::maybe_jemalloc(), .. Default::default() } } diff --git a/src/librustc_target/spec/dragonfly_base.rs b/src/librustc_target/spec/dragonfly_base.rs index 32eac8663a..a9e317b7cb 100644 --- a/src/librustc_target/spec/dragonfly_base.rs +++ b/src/librustc_target/spec/dragonfly_base.rs @@ -33,7 +33,6 @@ pub fn opts() -> TargetOptions { pre_link_args: args, position_independent_executables: true, relro_level: RelroLevel::Full, - exe_allocation_crate: super::maybe_jemalloc(), .. Default::default() } } diff --git a/src/librustc_target/spec/freebsd_base.rs b/src/librustc_target/spec/freebsd_base.rs index 04b8a6e706..c8a2946da5 100644 --- a/src/librustc_target/spec/freebsd_base.rs +++ b/src/librustc_target/spec/freebsd_base.rs @@ -34,7 +34,6 @@ pub fn opts() -> TargetOptions { position_independent_executables: true, eliminate_frame_pointer: false, // FIXME 43575 relro_level: RelroLevel::Full, - exe_allocation_crate: super::maybe_jemalloc(), abi_return_struct_as_int: true, .. Default::default() } diff --git a/src/librustc_target/spec/fuchsia_base.rs b/src/librustc_target/spec/fuchsia_base.rs index b593b83532..1d0474e1a9 100644 --- a/src/librustc_target/spec/fuchsia_base.rs +++ b/src/librustc_target/spec/fuchsia_base.rs @@ -8,33 +8,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use spec::{LinkArgs, LinkerFlavor, TargetOptions}; +use spec::{LldFlavor, LinkArgs, LinkerFlavor, TargetOptions}; use std::default::Default; pub fn opts() -> TargetOptions { - let mut args = LinkArgs::new(); - args.insert(LinkerFlavor::Gcc, vec![ - // We want to be able to strip as much executable code as possible - // from the linker command line, and this flag indicates to the - // linker that it can avoid linking in dynamic libraries that don't - // actually satisfy any symbols up to that point (as with many other - // resolutions the linker does). This option only applies to all - // following libraries so we're sure to pass it as one of the first - // arguments. - // FIXME: figure out whether these linker args are desirable - //"-Wl,--as-needed".to_string(), - - // Always enable NX protection when it is available - //"-Wl,-z,noexecstack".to_string(), + let mut pre_link_args = LinkArgs::new(); + pre_link_args.insert(LinkerFlavor::Lld(LldFlavor::Ld), vec![ + "--build-id".to_string(), + "--eh-frame-hdr".to_string(), + "--hash-style=gnu".to_string(), + "-z".to_string(), "rodynamic".to_string(), ]); TargetOptions { + linker: Some("rust-lld".to_owned()), + lld_flavor: LldFlavor::Ld, dynamic_linking: true, executables: true, target_family: Some("unix".to_string()), + is_like_fuchsia: true, linker_is_gnu: true, has_rpath: false, - pre_link_args: args, + pre_link_args: pre_link_args, + pre_link_objects_exe: vec![ + "Scrt1.o".to_string() + ], position_independent_executables: true, has_elf_tls: true, .. Default::default() diff --git a/src/librustc_target/spec/hermit_base.rs b/src/librustc_target/spec/hermit_base.rs index 2a24f771e9..168eac685e 100644 --- a/src/librustc_target/spec/hermit_base.rs +++ b/src/librustc_target/spec/hermit_base.rs @@ -21,7 +21,6 @@ pub fn opts() -> TargetOptions { ]); TargetOptions { - exe_allocation_crate: None, executables: true, has_elf_tls: true, linker_is_gnu: true, diff --git a/src/librustc_target/spec/l4re_base.rs b/src/librustc_target/spec/l4re_base.rs index 4ebc930d48..7932adf3b1 100644 --- a/src/librustc_target/spec/l4re_base.rs +++ b/src/librustc_target/spec/l4re_base.rs @@ -30,7 +30,6 @@ pub fn opts() -> TargetOptions { TargetOptions { executables: true, has_elf_tls: false, - exe_allocation_crate: None, panic_strategy: PanicStrategy::Abort, linker: Some("ld".to_string()), pre_link_args: args, diff --git a/src/librustc_target/spec/linux_base.rs b/src/librustc_target/spec/linux_base.rs index 4a9cd9e2f3..01f65d5736 100644 --- a/src/librustc_target/spec/linux_base.rs +++ b/src/librustc_target/spec/linux_base.rs @@ -36,7 +36,6 @@ pub fn opts() -> TargetOptions { pre_link_args: args, position_independent_executables: true, relro_level: RelroLevel::Full, - exe_allocation_crate: super::maybe_jemalloc(), has_elf_tls: true, .. Default::default() } diff --git a/src/librustc_target/spec/linux_musl_base.rs b/src/librustc_target/spec/linux_musl_base.rs index 7a3f3c2a51..c87f14977c 100644 --- a/src/librustc_target/spec/linux_musl_base.rs +++ b/src/librustc_target/spec/linux_musl_base.rs @@ -24,31 +24,6 @@ pub fn opts() -> TargetOptions { // argument is *not* necessary for normal builds, but it can't hurt! base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-Wl,--eh-frame-hdr".to_string()); - // There's a whole bunch of circular dependencies when dealing with MUSL - // unfortunately. To put this in perspective libc is statically linked to - // liblibc and libunwind is statically linked to libstd: - // - // * libcore depends on `fmod` which is in libc (transitively in liblibc). - // liblibc, however, depends on libcore. - // * compiler-rt has personality symbols that depend on libunwind, but - // libunwind is in libstd which depends on compiler-rt. - // - // Recall that linkers discard libraries and object files as much as - // possible, and with all the static linking and archives flying around with - // MUSL the linker is super aggressively stripping out objects. For example - // the first case has fmod stripped from liblibc (it's in its own object - // file) so it's not there when libcore needs it. In the second example all - // the unused symbols from libunwind are stripped (each is in its own object - // file in libstd) before we end up linking compiler-rt which depends on - // those symbols. - // - // To deal with these circular dependencies we just force the compiler to - // link everything as a group, not stripping anything out until everything - // is processed. The linker will still perform a pass to strip out object - // files but it won't do so until all objects/archives have been processed. - base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-Wl,-(".to_string()); - base.post_link_args.insert(LinkerFlavor::Gcc, vec!["-Wl,-)".to_string()]); - // When generating a statically linked executable there's generally some // small setup needed which is listed in these files. These are provided by // a musl toolchain and are linked by default by the `musl-gcc` script. Note diff --git a/src/librustc_target/spec/mips64_unknown_linux_gnuabi64.rs b/src/librustc_target/spec/mips64_unknown_linux_gnuabi64.rs index 1f60d91890..b80b6b561c 100644 --- a/src/librustc_target/spec/mips64_unknown_linux_gnuabi64.rs +++ b/src/librustc_target/spec/mips64_unknown_linux_gnuabi64.rs @@ -28,9 +28,6 @@ pub fn target() -> TargetResult { features: "+mips64r2".to_string(), max_atomic_width: Some(64), - // see #36994 - exe_allocation_crate: None, - ..super::linux_base::opts() }, }) diff --git a/src/librustc_target/spec/mips64el_unknown_linux_gnuabi64.rs b/src/librustc_target/spec/mips64el_unknown_linux_gnuabi64.rs index e42fde8d40..1c835af6e4 100644 --- a/src/librustc_target/spec/mips64el_unknown_linux_gnuabi64.rs +++ b/src/librustc_target/spec/mips64el_unknown_linux_gnuabi64.rs @@ -28,9 +28,6 @@ pub fn target() -> TargetResult { features: "+mips64r2".to_string(), max_atomic_width: Some(64), - // see #36994 - exe_allocation_crate: None, - ..super::linux_base::opts() }, }) diff --git a/src/librustc_target/spec/mips_unknown_linux_gnu.rs b/src/librustc_target/spec/mips_unknown_linux_gnu.rs index 59e15137cf..6331031c9a 100644 --- a/src/librustc_target/spec/mips_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/mips_unknown_linux_gnu.rs @@ -27,9 +27,6 @@ pub fn target() -> TargetResult { features: "+mips32r2,+fpxx,+nooddspreg".to_string(), max_atomic_width: Some(32), - // see #36994 - exe_allocation_crate: None, - ..super::linux_base::opts() }, }) diff --git a/src/librustc_target/spec/mips_unknown_linux_musl.rs b/src/librustc_target/spec/mips_unknown_linux_musl.rs index 8ee399ba56..0b20765172 100644 --- a/src/librustc_target/spec/mips_unknown_linux_musl.rs +++ b/src/librustc_target/spec/mips_unknown_linux_musl.rs @@ -15,8 +15,6 @@ pub fn target() -> TargetResult { base.cpu = "mips32r2".to_string(); base.features = "+mips32r2,+soft-float".to_string(); base.max_atomic_width = Some(32); - // see #36994 - base.exe_allocation_crate = None; base.crt_static_default = false; Ok(Target { llvm_target: "mips-unknown-linux-musl".to_string(), diff --git a/src/librustc_target/spec/mips_unknown_linux_uclibc.rs b/src/librustc_target/spec/mips_unknown_linux_uclibc.rs index 384ab1e413..d3f614c982 100644 --- a/src/librustc_target/spec/mips_unknown_linux_uclibc.rs +++ b/src/librustc_target/spec/mips_unknown_linux_uclibc.rs @@ -27,9 +27,6 @@ pub fn target() -> TargetResult { features: "+mips32r2,+soft-float".to_string(), max_atomic_width: Some(32), - // see #36994 - exe_allocation_crate: None, - ..super::linux_base::opts() }, }) diff --git a/src/librustc_target/spec/mipsel_unknown_linux_gnu.rs b/src/librustc_target/spec/mipsel_unknown_linux_gnu.rs index edd29164ca..79ebefa79a 100644 --- a/src/librustc_target/spec/mipsel_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/mipsel_unknown_linux_gnu.rs @@ -28,9 +28,6 @@ pub fn target() -> TargetResult { features: "+mips32r2,+fpxx,+nooddspreg".to_string(), max_atomic_width: Some(32), - // see #36994 - exe_allocation_crate: None, - ..super::linux_base::opts() }, }) diff --git a/src/librustc_target/spec/mipsel_unknown_linux_musl.rs b/src/librustc_target/spec/mipsel_unknown_linux_musl.rs index 1d9378ca1b..042e2b71c3 100644 --- a/src/librustc_target/spec/mipsel_unknown_linux_musl.rs +++ b/src/librustc_target/spec/mipsel_unknown_linux_musl.rs @@ -15,8 +15,6 @@ pub fn target() -> TargetResult { base.cpu = "mips32r2".to_string(); base.features = "+mips32r2,+soft-float".to_string(); base.max_atomic_width = Some(32); - // see #36994 - base.exe_allocation_crate = None; base.crt_static_default = false; Ok(Target { llvm_target: "mipsel-unknown-linux-musl".to_string(), diff --git a/src/librustc_target/spec/mipsel_unknown_linux_uclibc.rs b/src/librustc_target/spec/mipsel_unknown_linux_uclibc.rs index a1db1791bb..8cb5cd3f03 100644 --- a/src/librustc_target/spec/mipsel_unknown_linux_uclibc.rs +++ b/src/librustc_target/spec/mipsel_unknown_linux_uclibc.rs @@ -28,9 +28,6 @@ pub fn target() -> TargetResult { features: "+mips32r2,+soft-float".to_string(), max_atomic_width: Some(32), - // see #36994 - exe_allocation_crate: None, - ..super::linux_base::opts() }, }) diff --git a/src/librustc_target/spec/mod.rs b/src/librustc_target/spec/mod.rs index d43d45f64a..5b8070cbf3 100644 --- a/src/librustc_target/spec/mod.rs +++ b/src/librustc_target/spec/mod.rs @@ -297,7 +297,9 @@ supported_targets! { ("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu), ("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu), ("powerpc-unknown-linux-gnuspe", powerpc_unknown_linux_gnuspe), + ("powerpc-unknown-linux-musl", powerpc_unknown_linux_musl), ("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu), + ("powerpc64-unknown-linux-musl", powerpc64_unknown_linux_musl), ("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu), ("powerpc64le-unknown-linux-musl", powerpc64le_unknown_linux_musl), ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu), @@ -397,6 +399,7 @@ supported_targets! { ("thumbv7m-none-eabi", thumbv7m_none_eabi), ("thumbv7em-none-eabi", thumbv7em_none_eabi), ("thumbv7em-none-eabihf", thumbv7em_none_eabihf), + ("thumbv8m.base-none-eabi", thumbv8m_base_none_eabi), ("msp430-none-elf", msp430_none_elf), @@ -412,6 +415,8 @@ supported_targets! { ("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf), ("aarch64-unknown-none", aarch64_unknown_none), + + ("x86_64-fortanix-unknown-sgx", x86_64_fortanix_unknown_sgx), } /// Everything `rustc` knows about how to compile for a specific target. @@ -444,11 +449,11 @@ pub struct Target { pub options: TargetOptions, } -pub trait HasTargetSpec: Copy { +pub trait HasTargetSpec { fn target_spec(&self) -> &Target; } -impl<'a> HasTargetSpec for &'a Target { +impl HasTargetSpec for Target { fn target_spec(&self) -> &Target { self } @@ -556,6 +561,8 @@ pub struct TargetOptions { /// Emscripten toolchain. /// Defaults to false. pub is_like_emscripten: bool, + /// Whether the target toolchain is like Fuchsia's. + pub is_like_fuchsia: bool, /// Whether the linker support GNU-like arguments such as -O. Defaults to false. pub linker_is_gnu: bool, /// The MinGW toolchain has a known issue that prevents it from correctly @@ -596,9 +603,6 @@ pub struct TargetOptions { /// `eh_unwind_resume` lang item. pub custom_unwind_resume: bool, - /// If necessary, a different crate to link exe allocators by default - pub exe_allocation_crate: Option, - /// Flag indicating whether ELF TLS (e.g. #[thread_local]) is available for /// this target. pub has_elf_tls: bool, @@ -686,6 +690,10 @@ pub struct TargetOptions { /// target features. This is `true` by default, and `false` for targets like /// wasm32 where the whole program either has simd or not. pub simd_types_indirect: bool, + + /// If set, have the linker export exactly these symbols, instead of using + /// the usual logic to figure this out from the crate itself. + pub override_export_symbols: Option> } impl Default for TargetOptions { @@ -724,6 +732,7 @@ impl Default for TargetOptions { is_like_android: false, is_like_emscripten: false, is_like_msvc: false, + is_like_fuchsia: false, linker_is_gnu: false, allows_weak_linkage: true, has_rpath: false, @@ -740,7 +749,6 @@ impl Default for TargetOptions { link_env: Vec::new(), archive_format: "gnu".to_string(), custom_unwind_resume: false, - exe_allocation_crate: None, allow_asm: true, has_elf_tls: false, obj_is_bitcode: false, @@ -767,6 +775,7 @@ impl Default for TargetOptions { emit_debug_gdb_scripts: true, requires_uwtable: false, simd_types_indirect: true, + override_export_symbols: None, } } } @@ -902,6 +911,14 @@ impl Target { ) ); } ); + ($key_name:ident, opt_list) => ( { + let name = (stringify!($key_name)).replace("_", "-"); + obj.find(&name[..]).map(|o| o.as_array() + .map(|v| base.options.$key_name = Some(v.iter() + .map(|a| a.as_string().unwrap().to_string()).collect()) + ) + ); + } ); ($key_name:ident, optional) => ( { let name = (stringify!($key_name)).replace("_", "-"); if let Some(o) = obj.find(&name[..]) { @@ -979,7 +996,7 @@ impl Target { key!(is_builtin, bool); key!(linker, optional); - try!(key!(lld_flavor, LldFlavor)); + key!(lld_flavor, LldFlavor)?; key!(pre_link_args, link_args); key!(pre_link_args_crt, link_args); key!(pre_link_objects_exe, list); @@ -1015,24 +1032,24 @@ impl Target { key!(is_like_msvc, bool); key!(is_like_emscripten, bool); key!(is_like_android, bool); + key!(is_like_fuchsia, bool); key!(linker_is_gnu, bool); key!(allows_weak_linkage, bool); key!(has_rpath, bool); key!(no_default_libraries, bool); key!(position_independent_executables, bool); key!(needs_plt, bool); - try!(key!(relro_level, RelroLevel)); + key!(relro_level, RelroLevel)?; key!(archive_format); key!(allow_asm, bool); key!(custom_unwind_resume, bool); - key!(exe_allocation_crate, optional); key!(has_elf_tls, bool); key!(obj_is_bitcode, bool); key!(no_integrated_as, bool); key!(max_atomic_width, Option); key!(min_atomic_width, Option); key!(atomic_cas, bool); - try!(key!(panic_strategy, PanicStrategy)); + key!(panic_strategy, PanicStrategy)?; key!(crt_static_allows_dylibs, bool); key!(crt_static_default, bool); key!(crt_static_respected, bool); @@ -1049,6 +1066,7 @@ impl Target { key!(emit_debug_gdb_scripts, bool); key!(requires_uwtable, bool); key!(simd_types_indirect, bool); + key!(override_export_symbols, opt_list); if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) { for name in array.iter().filter_map(|abi| abi.as_string()) { @@ -1225,6 +1243,7 @@ impl ToJson for Target { target_option_val!(is_like_msvc); target_option_val!(is_like_emscripten); target_option_val!(is_like_android); + target_option_val!(is_like_fuchsia); target_option_val!(linker_is_gnu); target_option_val!(allows_weak_linkage); target_option_val!(has_rpath); @@ -1235,7 +1254,6 @@ impl ToJson for Target { target_option_val!(archive_format); target_option_val!(allow_asm); target_option_val!(custom_unwind_resume); - target_option_val!(exe_allocation_crate); target_option_val!(has_elf_tls); target_option_val!(obj_is_bitcode); target_option_val!(no_integrated_as); @@ -1259,6 +1277,7 @@ impl ToJson for Target { target_option_val!(emit_debug_gdb_scripts); target_option_val!(requires_uwtable); target_option_val!(simd_types_indirect); + target_option_val!(override_export_symbols); if default.abi_blacklist != self.options.abi_blacklist { d.insert("abi-blacklist".to_string(), self.options.abi_blacklist.iter() @@ -1270,14 +1289,6 @@ impl ToJson for Target { } } -fn maybe_jemalloc() -> Option { - if cfg!(feature = "jemalloc") { - Some("alloc_jemalloc".to_string()) - } else { - None - } -} - /// Either a target triple string or a path to a JSON file. #[derive(PartialEq, Clone, Debug, Hash, RustcEncodable, RustcDecodable)] pub enum TargetTriple { diff --git a/src/librustc_target/spec/msp430_none_elf.rs b/src/librustc_target/spec/msp430_none_elf.rs index 0958a95898..d9ab238449 100644 --- a/src/librustc_target/spec/msp430_none_elf.rs +++ b/src/librustc_target/spec/msp430_none_elf.rs @@ -35,9 +35,14 @@ pub fn target() -> TargetResult { no_integrated_as: true, // There are no atomic CAS instructions available in the MSP430 - // instruction set - max_atomic_width: Some(16), + // instruction set, and the LLVM backend doesn't currently support + // compiler fences so the Atomic* API is missing on this target. + // When the LLVM backend gains support for compile fences uncomment + // the `singlethread: true` line and set `max_atomic_width` to + // `Some(16)`. + max_atomic_width: Some(0), atomic_cas: false, + // singlethread: true, // Because these devices have very little resources having an // unwinder is too onerous so we default to "abort" because the diff --git a/src/librustc_target/spec/powerpc64_unknown_linux_gnu.rs b/src/librustc_target/spec/powerpc64_unknown_linux_gnu.rs index 1959871161..1d0afcd5e0 100644 --- a/src/librustc_target/spec/powerpc64_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/powerpc64_unknown_linux_gnu.rs @@ -20,9 +20,6 @@ pub fn target() -> TargetResult { // for now. https://github.com/rust-lang/rust/pull/43170#issuecomment-315411474 base.relro_level = RelroLevel::Partial; - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "powerpc64-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), diff --git a/src/librustc_target/spec/powerpc64_unknown_linux_musl.rs b/src/librustc_target/spec/powerpc64_unknown_linux_musl.rs new file mode 100644 index 0000000000..95e95510e1 --- /dev/null +++ b/src/librustc_target/spec/powerpc64_unknown_linux_musl.rs @@ -0,0 +1,32 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use spec::{LinkerFlavor, Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + base.cpu = "ppc64".to_string(); + base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); + base.max_atomic_width = Some(64); + + Ok(Target { + llvm_target: "powerpc64-unknown-linux-musl".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "64".to_string(), + target_c_int_width: "32".to_string(), + data_layout: "E-m:e-i64:64-n32:64".to_string(), + arch: "powerpc64".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + linker_flavor: LinkerFlavor::Gcc, + options: base, + }) +} diff --git a/src/librustc_target/spec/powerpc64le_unknown_linux_gnu.rs b/src/librustc_target/spec/powerpc64le_unknown_linux_gnu.rs index 39840692df..01811c5a0c 100644 --- a/src/librustc_target/spec/powerpc64le_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/powerpc64le_unknown_linux_gnu.rs @@ -16,9 +16,6 @@ pub fn target() -> TargetResult { base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.max_atomic_width = Some(64); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "powerpc64le-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_target/spec/powerpc64le_unknown_linux_musl.rs b/src/librustc_target/spec/powerpc64le_unknown_linux_musl.rs index 34ec824122..590c5ba8d5 100644 --- a/src/librustc_target/spec/powerpc64le_unknown_linux_musl.rs +++ b/src/librustc_target/spec/powerpc64le_unknown_linux_musl.rs @@ -16,9 +16,6 @@ pub fn target() -> TargetResult { base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.max_atomic_width = Some(64); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "powerpc64le-unknown-linux-musl".to_string(), target_endian: "little".to_string(), diff --git a/src/librustc_target/spec/powerpc_unknown_linux_gnu.rs b/src/librustc_target/spec/powerpc_unknown_linux_gnu.rs index c05b110a75..99d8d99fbb 100644 --- a/src/librustc_target/spec/powerpc_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/powerpc_unknown_linux_gnu.rs @@ -15,9 +15,6 @@ pub fn target() -> TargetResult { base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string()); base.max_atomic_width = Some(32); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "powerpc-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), diff --git a/src/librustc_target/spec/powerpc_unknown_linux_gnuspe.rs b/src/librustc_target/spec/powerpc_unknown_linux_gnuspe.rs index c76c3119c8..9b15b0a5dc 100644 --- a/src/librustc_target/spec/powerpc_unknown_linux_gnuspe.rs +++ b/src/librustc_target/spec/powerpc_unknown_linux_gnuspe.rs @@ -15,9 +15,6 @@ pub fn target() -> TargetResult { base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mspe".to_string()); base.max_atomic_width = Some(32); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "powerpc-unknown-linux-gnuspe".to_string(), target_endian: "big".to_string(), diff --git a/src/librustc_target/spec/powerpc_unknown_linux_musl.rs b/src/librustc_target/spec/powerpc_unknown_linux_musl.rs new file mode 100644 index 0000000000..1a4d0cb323 --- /dev/null +++ b/src/librustc_target/spec/powerpc_unknown_linux_musl.rs @@ -0,0 +1,31 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use spec::{LinkerFlavor, Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string()); + base.max_atomic_width = Some(32); + + Ok(Target { + llvm_target: "powerpc-unknown-linux-musl".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), + data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), + arch: "powerpc".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + linker_flavor: LinkerFlavor::Gcc, + options: base, + }) +} diff --git a/src/librustc_target/spec/powerpc_unknown_netbsd.rs b/src/librustc_target/spec/powerpc_unknown_netbsd.rs index 740222c960..98625a63f5 100644 --- a/src/librustc_target/spec/powerpc_unknown_netbsd.rs +++ b/src/librustc_target/spec/powerpc_unknown_netbsd.rs @@ -15,9 +15,6 @@ pub fn target() -> TargetResult { base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string()); base.max_atomic_width = Some(32); - // see #36994 - base.exe_allocation_crate = None; - Ok(Target { llvm_target: "powerpc-unknown-netbsd".to_string(), target_endian: "big".to_string(), diff --git a/src/librustc_target/spec/s390x_unknown_linux_gnu.rs b/src/librustc_target/spec/s390x_unknown_linux_gnu.rs index c9a9625eba..bd8b7e435d 100644 --- a/src/librustc_target/spec/s390x_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/s390x_unknown_linux_gnu.rs @@ -19,8 +19,6 @@ pub fn target() -> TargetResult { // Pass the -vector feature string to LLVM to respect this assumption. base.features = "-vector".to_string(); base.max_atomic_width = Some(64); - // see #36994 - base.exe_allocation_crate = None; base.min_global_align = Some(16); Ok(Target { diff --git a/src/librustc_target/spec/solaris_base.rs b/src/librustc_target/spec/solaris_base.rs index c14cc3f5bc..93b889d5d3 100644 --- a/src/librustc_target/spec/solaris_base.rs +++ b/src/librustc_target/spec/solaris_base.rs @@ -18,7 +18,6 @@ pub fn opts() -> TargetOptions { has_rpath: true, target_family: Some("unix".to_string()), is_like_solaris: true, - exe_allocation_crate: super::maybe_jemalloc(), .. Default::default() } diff --git a/src/librustc_target/spec/sparc64_unknown_linux_gnu.rs b/src/librustc_target/spec/sparc64_unknown_linux_gnu.rs index f68b5fd24b..f2b99aa46d 100644 --- a/src/librustc_target/spec/sparc64_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/sparc64_unknown_linux_gnu.rs @@ -14,7 +14,6 @@ pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); base.cpu = "v9".to_string(); base.max_atomic_width = Some(64); - base.exe_allocation_crate = None; Ok(Target { llvm_target: "sparc64-unknown-linux-gnu".to_string(), diff --git a/src/librustc_target/spec/sparc_unknown_linux_gnu.rs b/src/librustc_target/spec/sparc_unknown_linux_gnu.rs index 4e352374f9..81db39cd23 100644 --- a/src/librustc_target/spec/sparc_unknown_linux_gnu.rs +++ b/src/librustc_target/spec/sparc_unknown_linux_gnu.rs @@ -15,7 +15,6 @@ pub fn target() -> TargetResult { base.cpu = "v9".to_string(); base.max_atomic_width = Some(64); base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mv8plus".to_string()); - base.exe_allocation_crate = None; Ok(Target { llvm_target: "sparc-unknown-linux-gnu".to_string(), diff --git a/src/librustc_target/spec/sparcv9_sun_solaris.rs b/src/librustc_target/spec/sparcv9_sun_solaris.rs index 8bc233107b..5029e857eb 100644 --- a/src/librustc_target/spec/sparcv9_sun_solaris.rs +++ b/src/librustc_target/spec/sparcv9_sun_solaris.rs @@ -16,7 +16,6 @@ pub fn target() -> TargetResult { // llvm calls this "v9" base.cpu = "v9".to_string(); base.max_atomic_width = Some(64); - base.exe_allocation_crate = None; Ok(Target { llvm_target: "sparcv9-sun-solaris".to_string(), diff --git a/src/librustc_target/spec/thumb_base.rs b/src/librustc_target/spec/thumb_base.rs index 4c9a4764ef..22e5f49fd5 100644 --- a/src/librustc_target/spec/thumb_base.rs +++ b/src/librustc_target/spec/thumb_base.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// These 4 `thumbv*` targets cover the ARM Cortex-M family of processors which are widely used in +// These `thumbv*` targets cover the ARM Cortex-M family of processors which are widely used in // microcontrollers. Namely, all these processors: // // - Cortex-M0 @@ -17,8 +17,9 @@ // - Cortex-M3 // - Cortex-M4(F) // - Cortex-M7(F) +// - Cortex-M23 // -// We have opted for 4 targets instead of one target per processor (e.g. `cortex-m0`, `cortex-m3`, +// We have opted for these instead of one target per processor (e.g. `cortex-m0`, `cortex-m3`, // etc) because the differences between some processors like the cortex-m0 and cortex-m1 are almost // non-existent from the POV of codegen so it doesn't make sense to have separate targets for them. // And if differences exist between two processors under the same target, rustc flags can be used to diff --git a/src/librustc_target/spec/thumbv8m_base_none_eabi.rs b/src/librustc_target/spec/thumbv8m_base_none_eabi.rs new file mode 100644 index 0000000000..b614371156 --- /dev/null +++ b/src/librustc_target/spec/thumbv8m_base_none_eabi.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Targets the Cortex-M23 processor (Baseline ARMv8-M) + +use spec::{LinkerFlavor, LldFlavor, Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "thumbv8m.base-none-eabi".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + target_c_int_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "none".to_string(), + target_env: String::new(), + target_vendor: String::new(), + linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), + + options: TargetOptions { + // ARMv8-M baseline doesn't support unaligned loads/stores so we disable them + // with +strict-align. + features: "+strict-align".to_string(), + max_atomic_width: Some(32), + .. super::thumb_base::opts() + }, + }) +} diff --git a/src/librustc_target/spec/wasm32_unknown_emscripten.rs b/src/librustc_target/spec/wasm32_unknown_emscripten.rs index b4c09f86b8..2c80f3b4b3 100644 --- a/src/librustc_target/spec/wasm32_unknown_emscripten.rs +++ b/src/librustc_target/spec/wasm32_unknown_emscripten.rs @@ -11,12 +11,18 @@ use super::{LinkArgs, LinkerFlavor, Target, TargetOptions}; pub fn target() -> Result { + // FIXME(nikic) BINARYEN_TRAP_MODE=clamp is needed to avoid trapping in our + // -Zsaturating-float-casts implementation. This can be dropped if/when + // we have native fpto[su]i.sat intrinsics, or the implementation otherwise + // stops relying on non-trapping fpto[su]i. let mut post_link_args = LinkArgs::new(); post_link_args.insert(LinkerFlavor::Em, vec!["-s".to_string(), "BINARYEN=1".to_string(), "-s".to_string(), - "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()]); + "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string(), + "-s".to_string(), + "BINARYEN_TRAP_MODE='clamp'".to_string()]); let opts = TargetOptions { dynamic_linking: false, diff --git a/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs b/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs new file mode 100644 index 0000000000..07383b3d64 --- /dev/null +++ b/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs @@ -0,0 +1,72 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::iter; + +use super::{LinkerFlavor, Target, TargetOptions, PanicStrategy}; + +pub fn target() -> Result { + const PRE_LINK_ARGS: &[&str] = &[ + "-Wl,--as-needed", + "-Wl,-z,noexecstack", + "-m64", + "-fuse-ld=gold", + "-nostdlib", + "-shared", + "-Wl,-e,sgx_entry", + "-Wl,-Bstatic", + "-Wl,--gc-sections", + "-Wl,-z,text", + "-Wl,-z,norelro", + "-Wl,--rosegment", + "-Wl,--no-undefined", + "-Wl,--error-unresolved-symbols", + "-Wl,--no-undefined-version", + "-Wl,-Bsymbolic", + "-Wl,--export-dynamic", + ]; + const EXPORT_SYMBOLS: &[&str] = &[ + "sgx_entry", + "HEAP_BASE", + "HEAP_SIZE", + "RELA", + "RELACOUNT", + "ENCLAVE_SIZE", + "CFGDATA_BASE", + "DEBUG", + ]; + let opts = TargetOptions { + dynamic_linking: false, + executables: true, + linker_is_gnu: true, + max_atomic_width: Some(64), + panic_strategy: PanicStrategy::Abort, + cpu: "x86-64".into(), + position_independent_executables: true, + pre_link_args: iter::once( + (LinkerFlavor::Gcc, PRE_LINK_ARGS.iter().cloned().map(String::from).collect()) + ).collect(), + override_export_symbols: Some(EXPORT_SYMBOLS.iter().cloned().map(String::from).collect()), + ..Default::default() + }; + Ok(Target { + llvm_target: "x86_64-unknown-linux-gnu".into(), + target_endian: "little".into(), + target_pointer_width: "64".into(), + target_c_int_width: "32".into(), + target_os: "unknown".into(), + target_env: "sgx".into(), + target_vendor: "fortanix".into(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".into(), + arch: "x86_64".into(), + linker_flavor: LinkerFlavor::Gcc, + options: opts, + }) +} diff --git a/src/librustc_target/spec/x86_64_fuchsia.rs b/src/librustc_target/spec/x86_64_fuchsia.rs index 62148a740d..08df78d0db 100644 --- a/src/librustc_target/spec/x86_64_fuchsia.rs +++ b/src/librustc_target/spec/x86_64_fuchsia.rs @@ -8,13 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use spec::{LinkerFlavor, Target, TargetResult}; +use spec::{LldFlavor, LinkerFlavor, Target, TargetResult}; pub fn target() -> TargetResult { let mut base = super::fuchsia_base::opts(); base.cpu = "x86-64".to_string(); base.max_atomic_width = Some(64); - base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); base.stack_probes = true; Ok(Target { @@ -27,7 +26,7 @@ pub fn target() -> TargetResult { target_os: "fuchsia".to_string(), target_env: String::new(), target_vendor: String::new(), - linker_flavor: LinkerFlavor::Gcc, + linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld), options: base, }) } diff --git a/src/librustc_target/spec/x86_64_rumprun_netbsd.rs b/src/librustc_target/spec/x86_64_rumprun_netbsd.rs index 684bf5a6c1..e7570cd2da 100644 --- a/src/librustc_target/spec/x86_64_rumprun_netbsd.rs +++ b/src/librustc_target/spec/x86_64_rumprun_netbsd.rs @@ -21,7 +21,6 @@ pub fn target() -> TargetResult { base.has_rpath = false; base.position_independent_executables = false; base.disable_redzone = true; - base.exe_allocation_crate = None; base.stack_probes = true; Ok(Target { diff --git a/src/librustc_traits/Cargo.toml b/src/librustc_traits/Cargo.toml index 16f0f11757..f057cbb503 100644 --- a/src/librustc_traits/Cargo.toml +++ b/src/librustc_traits/Cargo.toml @@ -14,6 +14,7 @@ graphviz = { path = "../libgraphviz" } log = { version = "0.4" } rustc = { path = "../librustc" } rustc_data_structures = { path = "../librustc_data_structures" } +rustc_target = { path = "../librustc_target" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } chalk-engine = { version = "0.8.0", default-features=false } diff --git a/src/librustc_traits/chalk_context.rs b/src/librustc_traits/chalk_context.rs deleted file mode 100644 index 5d6badf120..0000000000 --- a/src/librustc_traits/chalk_context.rs +++ /dev/null @@ -1,716 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use chalk_engine::fallible::Fallible as ChalkEngineFallible; -use chalk_engine::{context, hh::HhGoal, DelayedLiteral, ExClause}; -use rustc::infer::canonical::{ - Canonical, CanonicalVarValues, OriginalQueryValues, QueryRegionConstraint, QueryResponse, -}; -use rustc::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime}; -use rustc::traits::{ - WellFormed, - FromEnv, - DomainGoal, - ExClauseFold, - ExClauseLift, - Goal, - GoalKind, - Clause, - ProgramClauseCategory, - QuantifierKind, - Environment, - InEnvironment, -}; -use rustc::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; -use rustc::ty::subst::Kind; -use rustc::ty::{self, TyCtxt}; -use rustc::hir::def_id::DefId; - -use std::fmt::{self, Debug}; -use std::marker::PhantomData; - -use syntax_pos::DUMMY_SP; - -#[derive(Copy, Clone, Debug)] -crate struct ChalkArenas<'gcx> { - _phantom: PhantomData<&'gcx ()>, -} - -#[derive(Copy, Clone)] -crate struct ChalkContext<'cx, 'gcx: 'cx> { - _arenas: ChalkArenas<'gcx>, - _tcx: TyCtxt<'cx, 'gcx, 'gcx>, -} - -#[derive(Copy, Clone)] -crate struct ChalkInferenceContext<'cx, 'gcx: 'tcx, 'tcx: 'cx> { - infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, -} - -#[derive(Copy, Clone, Debug)] -crate struct UniverseMap; - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -crate struct ConstrainedSubst<'tcx> { - subst: CanonicalVarValues<'tcx>, - constraints: Vec>, -} - -BraceStructTypeFoldableImpl! { - impl<'tcx> TypeFoldable<'tcx> for ConstrainedSubst<'tcx> { - subst, constraints - } -} - -impl context::Context for ChalkArenas<'tcx> { - type CanonicalExClause = Canonical<'tcx, ExClause>; - - type CanonicalGoalInEnvironment = Canonical<'tcx, InEnvironment<'tcx, Goal<'tcx>>>; - - // u-canonicalization not yet implemented - type UCanonicalGoalInEnvironment = Canonical<'tcx, InEnvironment<'tcx, Goal<'tcx>>>; - - type CanonicalConstrainedSubst = Canonical<'tcx, ConstrainedSubst<'tcx>>; - - // u-canonicalization not yet implemented - type UniverseMap = UniverseMap; - - type Solution = Canonical<'tcx, QueryResponse<'tcx, ()>>; - - type InferenceNormalizedSubst = CanonicalVarValues<'tcx>; - - type GoalInEnvironment = InEnvironment<'tcx, Goal<'tcx>>; - - type RegionConstraint = QueryRegionConstraint<'tcx>; - - type Substitution = CanonicalVarValues<'tcx>; - - type Environment = Environment<'tcx>; - - type Goal = Goal<'tcx>; - - type DomainGoal = DomainGoal<'tcx>; - - type BindersGoal = ty::Binder>; - - type Parameter = Kind<'tcx>; - - type ProgramClause = Clause<'tcx>; - - type ProgramClauses = Vec>; - - type UnificationResult = InferOk<'tcx, ()>; - - fn goal_in_environment( - env: &Environment<'tcx>, - goal: Goal<'tcx>, - ) -> InEnvironment<'tcx, Goal<'tcx>> { - env.with(goal) - } -} - -impl context::AggregateOps> for ChalkContext<'cx, 'gcx> { - fn make_solution( - &self, - _root_goal: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - _simplified_answers: impl context::AnswerStream>, - ) -> Option>> { - unimplemented!() - } -} - -impl context::ContextOps> for ChalkContext<'cx, 'gcx> { - /// True if this is a coinductive goal -- e.g., proving an auto trait. - fn is_coinductive( - &self, - _goal: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> - ) -> bool { - unimplemented!() - } - - /// Create an inference table for processing a new goal and instantiate that goal - /// in that context, returning "all the pieces". - /// - /// More specifically: given a u-canonical goal `arg`, creates a - /// new inference table `T` and populates it with the universes - /// found in `arg`. Then, creates a substitution `S` that maps - /// each bound variable in `arg` to a fresh inference variable - /// from T. Returns: - /// - /// - the table `T` - /// - the substitution `S` - /// - the environment and goal found by substitution `S` into `arg` - fn instantiate_ucanonical_goal( - &self, - _arg: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - _op: impl context::WithInstantiatedUCanonicalGoal, Output = R>, - ) -> R { - unimplemented!() - } - - fn instantiate_ex_clause( - &self, - _num_universes: usize, - _canonical_ex_clause: &Canonical<'gcx, ChalkExClause<'gcx>>, - _op: impl context::WithInstantiatedExClause, Output = R>, - ) -> R { - unimplemented!() - } - - /// True if this solution has no region constraints. - fn empty_constraints(ccs: &Canonical<'gcx, ConstrainedSubst<'gcx>>) -> bool { - ccs.value.constraints.is_empty() - } - - fn inference_normalized_subst_from_ex_clause( - canon_ex_clause: &'a Canonical<'gcx, ChalkExClause<'gcx>>, - ) -> &'a CanonicalVarValues<'gcx> { - &canon_ex_clause.value.subst - } - - fn inference_normalized_subst_from_subst( - canon_subst: &'a Canonical<'gcx, ConstrainedSubst<'gcx>>, - ) -> &'a CanonicalVarValues<'gcx> { - &canon_subst.value.subst - } - - fn canonical( - u_canon: &'a Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - ) -> &'a Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> { - u_canon - } - - fn is_trivial_substitution( - _u_canon: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - _canonical_subst: &Canonical<'gcx, ConstrainedSubst<'gcx>>, - ) -> bool { - unimplemented!() - } - - fn num_universes(_: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>) -> usize { - 0 // FIXME - } - - /// Convert a goal G *from* the canonical universes *into* our - /// local universes. This will yield a goal G' that is the same - /// but for the universes of universally quantified names. - fn map_goal_from_canonical( - _map: &UniverseMap, - value: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - ) -> Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> { - *value // FIXME universe maps not implemented yet - } - - fn map_subst_from_canonical( - _map: &UniverseMap, - value: &Canonical<'gcx, ConstrainedSubst<'gcx>>, - ) -> Canonical<'gcx, ConstrainedSubst<'gcx>> { - value.clone() // FIXME universe maps not implemented yet - } -} - -//impl context::UCanonicalGoalInEnvironment> -// for Canonical<'gcx, ty::ParamEnvAnd<'gcx, Goal<'gcx>>> -//{ -// fn canonical(&self) -> &Canonical<'gcx, ty::ParamEnvAnd<'gcx, Goal<'gcx>>> { -// self -// } -// -// fn is_trivial_substitution( -// &self, -// canonical_subst: &Canonical<'tcx, ConstrainedSubst<'tcx>>, -// ) -> bool { -// let subst = &canonical_subst.value.subst; -// assert_eq!(self.canonical.variables.len(), subst.var_values.len()); -// subst -// .var_values -// .iter_enumerated() -// .all(|(cvar, kind)| match kind.unpack() { -// Kind::Lifetime(r) => match r { -// ty::ReCanonical(cvar1) => cvar == cvar1, -// _ => false, -// }, -// Kind::Type(ty) => match ty.sty { -// ty::Infer(ty::InferTy::CanonicalTy(cvar1)) => cvar == cvar1, -// _ => false, -// }, -// }) -// } -// -// fn num_universes(&self) -> usize { -// 0 // FIXME -// } -//} - -impl context::InferenceTable, ChalkArenas<'tcx>> - for ChalkInferenceContext<'cx, 'gcx, 'tcx> -{ - fn into_goal(&self, domain_goal: DomainGoal<'tcx>) -> Goal<'tcx> { - self.infcx.tcx.mk_goal(GoalKind::DomainGoal(domain_goal)) - } - - fn cannot_prove(&self) -> Goal<'tcx> { - self.infcx.tcx.mk_goal(GoalKind::CannotProve) - } - - fn into_hh_goal(&mut self, goal: Goal<'tcx>) -> ChalkHhGoal<'tcx> { - match *goal { - GoalKind::Implies(..) => panic!("FIXME rust-lang-nursery/chalk#94"), - GoalKind::And(left, right) => HhGoal::And(left, right), - GoalKind::Not(subgoal) => HhGoal::Not(subgoal), - GoalKind::DomainGoal(d) => HhGoal::DomainGoal(d), - GoalKind::Quantified(QuantifierKind::Universal, binder) => HhGoal::ForAll(binder), - GoalKind::Quantified(QuantifierKind::Existential, binder) => HhGoal::Exists(binder), - GoalKind::CannotProve => HhGoal::CannotProve, - } - } - - fn add_clauses( - &mut self, - env: &Environment<'tcx>, - clauses: Vec>, - ) -> Environment<'tcx> { - Environment { - clauses: self.infcx.tcx.mk_clauses( - env.clauses.iter().cloned().chain(clauses.into_iter()) - ) - } - } -} - -impl context::ResolventOps, ChalkArenas<'tcx>> - for ChalkInferenceContext<'cx, 'gcx, 'tcx> -{ - fn resolvent_clause( - &mut self, - _environment: &Environment<'tcx>, - _goal: &DomainGoal<'tcx>, - _subst: &CanonicalVarValues<'tcx>, - _clause: &Clause<'tcx>, - ) -> chalk_engine::fallible::Fallible>> { - panic!() - } - - fn apply_answer_subst( - &mut self, - _ex_clause: ChalkExClause<'tcx>, - _selected_goal: &InEnvironment<'tcx, Goal<'tcx>>, - _answer_table_goal: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - _canonical_answer_subst: &Canonical<'gcx, ConstrainedSubst<'gcx>>, - ) -> chalk_engine::fallible::Fallible> { - panic!() - } -} - -impl context::TruncateOps, ChalkArenas<'tcx>> - for ChalkInferenceContext<'cx, 'gcx, 'tcx> -{ - fn truncate_goal( - &mut self, - subgoal: &InEnvironment<'tcx, Goal<'tcx>>, - ) -> Option>> { - Some(*subgoal) // FIXME we should truncate at some point! - } - - fn truncate_answer( - &mut self, - subst: &CanonicalVarValues<'tcx>, - ) -> Option> { - Some(subst.clone()) // FIXME we should truncate at some point! - } -} - -impl context::UnificationOps, ChalkArenas<'tcx>> - for ChalkInferenceContext<'cx, 'gcx, 'tcx> -{ - fn program_clauses( - &self, - environment: &Environment<'tcx>, - goal: &DomainGoal<'tcx>, - ) -> Vec> { - use rustc::traits::WhereClause::*; - - fn assemble_clauses_from_impls<'tcx>( - tcx: ty::TyCtxt<'_, '_, 'tcx>, - trait_def_id: DefId, - clauses: &mut Vec> - ) { - tcx.for_each_impl(trait_def_id, |impl_def_id| { - clauses.extend( - tcx.program_clauses_for(impl_def_id) - .into_iter() - .cloned() - ); - }); - } - - fn assemble_clauses_from_assoc_ty_values<'tcx>( - tcx: ty::TyCtxt<'_, '_, 'tcx>, - trait_def_id: DefId, - clauses: &mut Vec> - ) { - tcx.for_each_impl(trait_def_id, |impl_def_id| { - for def_id in tcx.associated_item_def_ids(impl_def_id).iter() { - clauses.extend( - tcx.program_clauses_for(*def_id) - .into_iter() - .cloned() - ); - } - }); - } - - let mut clauses = match goal { - DomainGoal::Holds(Implemented(trait_predicate)) => { - // These come from: - // * implementations of the trait itself (rule `Implemented-From-Impl`) - // * the trait decl (rule `Implemented-From-Env`) - - let mut clauses = vec![]; - assemble_clauses_from_impls( - self.infcx.tcx, - trait_predicate.def_id(), - &mut clauses - ); - - // FIXME: we need to add special rules for builtin impls: - // * `Copy` / `Clone` - // * `Sized` - // * `Unsize` - // * `Generator` - // * `FnOnce` / `FnMut` / `Fn` - // * trait objects - // * auto traits - - // Rule `Implemented-From-Env` will be computed from the environment. - clauses - } - - DomainGoal::Holds(ProjectionEq(projection_predicate)) => { - // These come from: - // * the assoc type definition (rule `ProjectionEq-Placeholder`) - // * normalization of the assoc ty values (rule `ProjectionEq-Normalize`) - // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) - // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) - - let clauses = self.infcx.tcx.program_clauses_for( - projection_predicate.projection_ty.item_def_id - ).into_iter() - - // only select `ProjectionEq-Placeholder` and `ProjectionEq-Normalize` - .filter(|clause| clause.category() == ProgramClauseCategory::Other) - - .cloned() - .collect::>(); - - // Rules `Implied-Bound-From-Trait` and `Implied-Bound-From-Type` will be computed - // from the environment. - clauses - } - - DomainGoal::Holds(RegionOutlives(..)) => { - // These come from: - // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) - // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) - - // All of these rules are computed in the environment. - vec![] - } - - DomainGoal::Holds(TypeOutlives(..)) => { - // These come from: - // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) - // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) - - // All of these rules are computed in the environment. - vec![] - } - - DomainGoal::WellFormed(WellFormed::Trait(trait_predicate)) => { - // These come from -- the trait decl (rule `WellFormed-TraitRef`). - self.infcx.tcx.program_clauses_for(trait_predicate.def_id()) - .into_iter() - - // only select `WellFormed-TraitRef` - .filter(|clause| clause.category() == ProgramClauseCategory::WellFormed) - - .cloned() - .collect() - } - - DomainGoal::WellFormed(WellFormed::Ty(ty)) => { - // These come from: - // * the associated type definition if `ty` refers to an unnormalized - // associated type (rule `WellFormed-AssocTy`) - // * custom rules for built-in types - // * the type definition otherwise (rule `WellFormed-Type`) - let clauses = match ty.sty { - ty::Projection(data) => { - self.infcx.tcx.program_clauses_for(data.item_def_id) - } - - // These types are always WF (recall that we do not check - // for parameters to be WF) - ty::Bool | - ty::Char | - ty::Int(..) | - ty::Uint(..) | - ty::Float(..) | - ty::Str | - ty::RawPtr(..) | - ty::FnPtr(..) | - ty::Param(..) | - ty::Never => { - ty::List::empty() - } - - // WF if inner type is `Sized` - ty::Slice(..) | - ty::Array(..) => { - ty::List::empty() - } - - ty::Tuple(..) => { - ty::List::empty() - } - - // WF if `sub_ty` outlives `region` - ty::Ref(..) => { - ty::List::empty() - } - - ty::Dynamic(..) => { - // FIXME: no rules yet for trait objects - ty::List::empty() - } - - ty::Adt(def, ..) => { - self.infcx.tcx.program_clauses_for(def.did) - } - - ty::Foreign(def_id) | - ty::FnDef(def_id, ..) | - ty::Closure(def_id, ..) | - ty::Generator(def_id, ..) | - ty::Opaque(def_id, ..) => { - self.infcx.tcx.program_clauses_for(def_id) - } - - ty::GeneratorWitness(..) | - ty::UnnormalizedProjection(..) | - ty::Infer(..) | - ty::Error => { - bug!("unexpected type {:?}", ty) - } - }; - - clauses.into_iter() - .filter(|clause| clause.category() == ProgramClauseCategory::WellFormed) - .cloned() - .collect() - } - - DomainGoal::FromEnv(FromEnv::Trait(..)) => { - // These come from: - // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) - // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) - // * implied bounds from assoc type defs (rules `Implied-Trait-From-AssocTy`, - // `Implied-Bound-From-AssocTy` and `Implied-WC-From-AssocTy`) - - // All of these rules are computed in the environment. - vec![] - } - - DomainGoal::FromEnv(FromEnv::Ty(..)) => { - // There are no `FromEnv::Ty(..) :- ...` rules (this predicate only - // comes from the environment). - vec![] - } - - DomainGoal::Normalize(projection_predicate) => { - // These come from -- assoc ty values (rule `Normalize-From-Impl`). - let mut clauses = vec![]; - - assemble_clauses_from_assoc_ty_values( - self.infcx.tcx, - projection_predicate.projection_ty.trait_ref(self.infcx.tcx).def_id, - &mut clauses - ); - - clauses - } - }; - - let environment = self.infcx.tcx.lift_to_global(environment) - .expect("environment is not global"); - clauses.extend( - self.infcx.tcx.program_clauses_for_env(environment) - .into_iter() - .cloned() - ); - clauses - } - - fn instantiate_binders_universally( - &mut self, - _arg: &ty::Binder>, - ) -> Goal<'tcx> { - panic!("FIXME -- universal instantiation needs sgrif's branch") - } - - fn instantiate_binders_existentially( - &mut self, - arg: &ty::Binder>, - ) -> Goal<'tcx> { - let (value, _map) = self.infcx.replace_late_bound_regions_with_fresh_var( - DUMMY_SP, - LateBoundRegionConversionTime::HigherRankedType, - arg, - ); - value - } - - fn debug_ex_clause(&mut self, value: &'v ChalkExClause<'tcx>) -> Box { - let string = format!("{:?}", self.infcx.resolve_type_vars_if_possible(value)); - Box::new(string) - } - - fn canonicalize_goal( - &mut self, - value: &InEnvironment<'tcx, Goal<'tcx>>, - ) -> Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> { - let mut _orig_values = OriginalQueryValues::default(); - self.infcx.canonicalize_query(value, &mut _orig_values) - } - - fn canonicalize_ex_clause( - &mut self, - value: &ChalkExClause<'tcx>, - ) -> Canonical<'gcx, ChalkExClause<'gcx>> { - self.infcx.canonicalize_response(value) - } - - fn canonicalize_constrained_subst( - &mut self, - subst: CanonicalVarValues<'tcx>, - constraints: Vec>, - ) -> Canonical<'gcx, ConstrainedSubst<'gcx>> { - self.infcx.canonicalize_response(&ConstrainedSubst { subst, constraints }) - } - - fn u_canonicalize_goal( - &mut self, - value: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - ) -> ( - Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, - UniverseMap, - ) { - (value.clone(), UniverseMap) - } - - fn invert_goal( - &mut self, - _value: &InEnvironment<'tcx, Goal<'tcx>>, - ) -> Option>> { - panic!("goal inversion not yet implemented") - } - - fn unify_parameters( - &mut self, - _environment: &Environment<'tcx>, - _a: &Kind<'tcx>, - _b: &Kind<'tcx>, - ) -> ChalkEngineFallible> { - panic!() - } - - fn sink_answer_subset( - &self, - value: &Canonical<'gcx, ConstrainedSubst<'gcx>>, - ) -> Canonical<'tcx, ConstrainedSubst<'tcx>> { - value.clone() - } - - fn lift_delayed_literal( - &self, - _value: DelayedLiteral>, - ) -> DelayedLiteral> { - panic!("lift") - } - - fn into_ex_clause(&mut self, _result: InferOk<'tcx, ()>, _ex_clause: &mut ChalkExClause<'tcx>) { - panic!("TBD") - } -} - -type ChalkHhGoal<'tcx> = HhGoal>; - -type ChalkExClause<'tcx> = ExClause>; - -impl Debug for ChalkContext<'cx, 'gcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ChalkContext") - } -} - -impl Debug for ChalkInferenceContext<'cx, 'gcx, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ChalkInferenceContext") - } -} - -impl ExClauseLift<'gcx> for ChalkArenas<'a> { - type LiftedExClause = ChalkExClause<'gcx>; - - fn lift_ex_clause_to_tcx( - _ex_clause: &ChalkExClause<'a>, - _tcx: TyCtxt<'_, '_, 'tcx>, - ) -> Option { - panic!() - } -} - -impl ExClauseFold<'tcx> for ChalkArenas<'tcx> { - fn fold_ex_clause_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>( - ex_clause: &ChalkExClause<'tcx>, - folder: &mut F, - ) -> ChalkExClause<'tcx> { - ExClause { - subst: ex_clause.subst.fold_with(folder), - delayed_literals: ex_clause.delayed_literals.fold_with(folder), - constraints: ex_clause.constraints.fold_with(folder), - subgoals: ex_clause.subgoals.fold_with(folder), - } - } - - fn visit_ex_clause_with<'gcx: 'tcx, V: TypeVisitor<'tcx>>( - ex_clause: &ExClause, - visitor: &mut V, - ) -> bool { - let ExClause { - subst, - delayed_literals, - constraints, - subgoals, - } = ex_clause; - subst.visit_with(visitor) - && delayed_literals.visit_with(visitor) - && constraints.visit_with(visitor) - && subgoals.visit_with(visitor) - } -} - -BraceStructLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for ConstrainedSubst<'a> { - type Lifted = ConstrainedSubst<'tcx>; - - subst, constraints - } -} diff --git a/src/librustc_traits/chalk_context/mod.rs b/src/librustc_traits/chalk_context/mod.rs new file mode 100644 index 0000000000..58a8d2abd9 --- /dev/null +++ b/src/librustc_traits/chalk_context/mod.rs @@ -0,0 +1,586 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod program_clauses; +mod resolvent_ops; +mod unify; + +use chalk_engine::fallible::{Fallible, NoSolution}; +use chalk_engine::{ + context, + hh::HhGoal, + DelayedLiteral, + Literal, + ExClause +}; +use rustc::infer::{InferCtxt, LateBoundRegionConversionTime}; +use rustc::infer::canonical::{ + Canonical, + CanonicalVarValues, + OriginalQueryValues, + QueryResponse, + Certainty, +}; +use rustc::traits::{ + DomainGoal, + ExClauseFold, + ExClauseLift, + Goal, + GoalKind, + Clause, + QuantifierKind, + Environment, + InEnvironment, +}; +use rustc::ty::{self, TyCtxt}; +use rustc::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use rustc::ty::subst::{Kind, UnpackedKind}; +use syntax_pos::DUMMY_SP; + +use std::fmt::{self, Debug}; +use std::marker::PhantomData; + +use self::unify::*; + +#[derive(Copy, Clone, Debug)] +crate struct ChalkArenas<'gcx> { + _phantom: PhantomData<&'gcx ()>, +} + +#[derive(Copy, Clone)] +crate struct ChalkContext<'cx, 'gcx: 'cx> { + _arenas: ChalkArenas<'gcx>, + tcx: TyCtxt<'cx, 'gcx, 'gcx>, +} + +#[derive(Copy, Clone)] +crate struct ChalkInferenceContext<'cx, 'gcx: 'tcx, 'tcx: 'cx> { + infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, +} + +#[derive(Copy, Clone, Debug)] +crate struct UniverseMap; + +crate type RegionConstraint<'tcx> = ty::OutlivesPredicate, ty::Region<'tcx>>; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +crate struct ConstrainedSubst<'tcx> { + subst: CanonicalVarValues<'tcx>, + constraints: Vec>, +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ConstrainedSubst<'tcx> { + subst, constraints + } +} + +impl context::Context for ChalkArenas<'tcx> { + type CanonicalExClause = Canonical<'tcx, ChalkExClause<'tcx>>; + + type CanonicalGoalInEnvironment = Canonical<'tcx, InEnvironment<'tcx, Goal<'tcx>>>; + + // u-canonicalization not yet implemented + type UCanonicalGoalInEnvironment = Canonical<'tcx, InEnvironment<'tcx, Goal<'tcx>>>; + + type CanonicalConstrainedSubst = Canonical<'tcx, ConstrainedSubst<'tcx>>; + + // u-canonicalization not yet implemented + type UniverseMap = UniverseMap; + + type Solution = Canonical<'tcx, QueryResponse<'tcx, ()>>; + + type InferenceNormalizedSubst = CanonicalVarValues<'tcx>; + + type GoalInEnvironment = InEnvironment<'tcx, Goal<'tcx>>; + + type RegionConstraint = RegionConstraint<'tcx>; + + type Substitution = CanonicalVarValues<'tcx>; + + type Environment = Environment<'tcx>; + + type Goal = Goal<'tcx>; + + type DomainGoal = DomainGoal<'tcx>; + + type BindersGoal = ty::Binder>; + + type Parameter = Kind<'tcx>; + + type ProgramClause = Clause<'tcx>; + + type ProgramClauses = Vec>; + + type UnificationResult = UnificationResult<'tcx>; + + fn goal_in_environment( + env: &Environment<'tcx>, + goal: Goal<'tcx>, + ) -> InEnvironment<'tcx, Goal<'tcx>> { + env.with(goal) + } +} + +impl context::AggregateOps> for ChalkContext<'cx, 'gcx> { + fn make_solution( + &self, + _root_goal: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + mut simplified_answers: impl context::AnswerStream>, + ) -> Option>> { + use chalk_engine::SimplifiedAnswer; + + if simplified_answers.peek_answer().is_none() { + return None; + } + + let SimplifiedAnswer { subst, ambiguous } = simplified_answers + .next_answer() + .unwrap(); + + let ambiguous = simplified_answers.peek_answer().is_some() || ambiguous; + + Some(subst.unchecked_map(|subst| { + QueryResponse { + var_values: subst.subst, + region_constraints: subst.constraints + .into_iter() + .map(|c| ty::Binder::bind(c)) + .collect(), + certainty: match ambiguous { + true => Certainty::Ambiguous, + false => Certainty::Proven, + }, + value: (), + } + })) + } +} + +impl context::ContextOps> for ChalkContext<'cx, 'gcx> { + /// True if this is a coinductive goal -- e.g., proving an auto trait. + fn is_coinductive( + &self, + _goal: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> + ) -> bool { + unimplemented!() + } + + /// Create an inference table for processing a new goal and instantiate that goal + /// in that context, returning "all the pieces". + /// + /// More specifically: given a u-canonical goal `arg`, creates a + /// new inference table `T` and populates it with the universes + /// found in `arg`. Then, creates a substitution `S` that maps + /// each bound variable in `arg` to a fresh inference variable + /// from T. Returns: + /// + /// - the table `T` + /// - the substitution `S` + /// - the environment and goal found by substitution `S` into `arg` + fn instantiate_ucanonical_goal( + &self, + arg: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + op: impl context::WithInstantiatedUCanonicalGoal, Output = R>, + ) -> R { + self.tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, arg, |ref infcx, arg, subst| { + let chalk_infcx = &mut ChalkInferenceContext { + infcx, + }; + op.with(chalk_infcx, subst, arg.environment, arg.goal) + }) + } + + fn instantiate_ex_clause( + &self, + _num_universes: usize, + arg: &Canonical<'gcx, ChalkExClause<'gcx>>, + op: impl context::WithInstantiatedExClause, Output = R>, + ) -> R { + self.tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, &arg.upcast(), |ref infcx, arg, _| { + let chalk_infcx = &mut ChalkInferenceContext { + infcx, + }; + op.with(chalk_infcx,arg) + }) + } + + /// True if this solution has no region constraints. + fn empty_constraints(ccs: &Canonical<'gcx, ConstrainedSubst<'gcx>>) -> bool { + ccs.value.constraints.is_empty() + } + + fn inference_normalized_subst_from_ex_clause( + canon_ex_clause: &'a Canonical<'gcx, ChalkExClause<'gcx>>, + ) -> &'a CanonicalVarValues<'gcx> { + &canon_ex_clause.value.subst + } + + fn inference_normalized_subst_from_subst( + canon_subst: &'a Canonical<'gcx, ConstrainedSubst<'gcx>>, + ) -> &'a CanonicalVarValues<'gcx> { + &canon_subst.value.subst + } + + fn canonical( + u_canon: &'a Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + ) -> &'a Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> { + u_canon + } + + fn is_trivial_substitution( + u_canon: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + canonical_subst: &Canonical<'gcx, ConstrainedSubst<'gcx>>, + ) -> bool { + let subst = &canonical_subst.value.subst; + assert_eq!(u_canon.variables.len(), subst.var_values.len()); + subst.var_values + .iter_enumerated() + .all(|(cvar, kind)| match kind.unpack() { + UnpackedKind::Lifetime(r) => match r { + &ty::ReLateBound(debruijn, br) => { + debug_assert_eq!(debruijn, ty::INNERMOST); + cvar == br.assert_bound_var() + } + _ => false, + }, + UnpackedKind::Type(ty) => match ty.sty { + ty::Bound(debruijn, bound_ty) => { + debug_assert_eq!(debruijn, ty::INNERMOST); + cvar == bound_ty.var + } + _ => false, + }, + }) + } + + fn num_universes(canon: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>) -> usize { + canon.max_universe.index() + 1 + } + + /// Convert a goal G *from* the canonical universes *into* our + /// local universes. This will yield a goal G' that is the same + /// but for the universes of universally quantified names. + fn map_goal_from_canonical( + _map: &UniverseMap, + value: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + ) -> Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> { + *value // FIXME universe maps not implemented yet + } + + fn map_subst_from_canonical( + _map: &UniverseMap, + value: &Canonical<'gcx, ConstrainedSubst<'gcx>>, + ) -> Canonical<'gcx, ConstrainedSubst<'gcx>> { + value.clone() // FIXME universe maps not implemented yet + } +} + +impl context::InferenceTable, ChalkArenas<'tcx>> + for ChalkInferenceContext<'cx, 'gcx, 'tcx> +{ + fn into_goal(&self, domain_goal: DomainGoal<'tcx>) -> Goal<'tcx> { + self.infcx.tcx.mk_goal(GoalKind::DomainGoal(domain_goal)) + } + + fn cannot_prove(&self) -> Goal<'tcx> { + self.infcx.tcx.mk_goal(GoalKind::CannotProve) + } + + fn into_hh_goal(&mut self, goal: Goal<'tcx>) -> ChalkHhGoal<'tcx> { + match *goal { + GoalKind::Implies(hypotheses, goal) => HhGoal::Implies( + hypotheses.iter().cloned().collect(), + goal + ), + GoalKind::And(left, right) => HhGoal::And(left, right), + GoalKind::Not(subgoal) => HhGoal::Not(subgoal), + GoalKind::DomainGoal(d) => HhGoal::DomainGoal(d), + GoalKind::Quantified(QuantifierKind::Universal, binder) => HhGoal::ForAll(binder), + GoalKind::Quantified(QuantifierKind::Existential, binder) => HhGoal::Exists(binder), + GoalKind::CannotProve => HhGoal::CannotProve, + } + } + + fn add_clauses( + &mut self, + env: &Environment<'tcx>, + clauses: Vec>, + ) -> Environment<'tcx> { + Environment { + clauses: self.infcx.tcx.mk_clauses( + env.clauses.iter().cloned().chain(clauses.into_iter()) + ) + } + } +} + +impl context::TruncateOps, ChalkArenas<'tcx>> + for ChalkInferenceContext<'cx, 'gcx, 'tcx> +{ + fn truncate_goal( + &mut self, + subgoal: &InEnvironment<'tcx, Goal<'tcx>>, + ) -> Option>> { + Some(*subgoal) // FIXME we should truncate at some point! + } + + fn truncate_answer( + &mut self, + subst: &CanonicalVarValues<'tcx>, + ) -> Option> { + Some(subst.clone()) // FIXME we should truncate at some point! + } +} + +impl context::UnificationOps, ChalkArenas<'tcx>> + for ChalkInferenceContext<'cx, 'gcx, 'tcx> +{ + fn program_clauses( + &self, + environment: &Environment<'tcx>, + goal: &DomainGoal<'tcx>, + ) -> Vec> { + self.program_clauses_impl(environment, goal) + } + + fn instantiate_binders_universally( + &mut self, + arg: &ty::Binder>, + ) -> Goal<'tcx> { + self.infcx.replace_bound_vars_with_placeholders(arg).0 + } + + fn instantiate_binders_existentially( + &mut self, + arg: &ty::Binder>, + ) -> Goal<'tcx> { + self.infcx.replace_bound_vars_with_fresh_vars( + DUMMY_SP, + LateBoundRegionConversionTime::HigherRankedType, + arg + ).0 + } + + fn debug_ex_clause(&mut self, value: &'v ChalkExClause<'tcx>) -> Box { + let string = format!("{:?}", self.infcx.resolve_type_vars_if_possible(value)); + Box::new(string) + } + + fn canonicalize_goal( + &mut self, + value: &InEnvironment<'tcx, Goal<'tcx>>, + ) -> Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>> { + let mut _orig_values = OriginalQueryValues::default(); + self.infcx.canonicalize_query(value, &mut _orig_values) + } + + fn canonicalize_ex_clause( + &mut self, + value: &ChalkExClause<'tcx>, + ) -> Canonical<'gcx, ChalkExClause<'gcx>> { + self.infcx.canonicalize_response(value) + } + + fn canonicalize_constrained_subst( + &mut self, + subst: CanonicalVarValues<'tcx>, + constraints: Vec>, + ) -> Canonical<'gcx, ConstrainedSubst<'gcx>> { + self.infcx.canonicalize_response(&ConstrainedSubst { subst, constraints }) + } + + fn u_canonicalize_goal( + &mut self, + value: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + ) -> ( + Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + UniverseMap, + ) { + (value.clone(), UniverseMap) + } + + fn invert_goal( + &mut self, + _value: &InEnvironment<'tcx, Goal<'tcx>>, + ) -> Option>> { + panic!("goal inversion not yet implemented") + } + + fn unify_parameters( + &mut self, + environment: &Environment<'tcx>, + a: &Kind<'tcx>, + b: &Kind<'tcx>, + ) -> Fallible> { + self.infcx.commit_if_ok(|_| { + unify(self.infcx, *environment, a, b).map_err(|_| NoSolution) + }) + } + + fn sink_answer_subset( + &self, + value: &Canonical<'gcx, ConstrainedSubst<'gcx>>, + ) -> Canonical<'tcx, ConstrainedSubst<'tcx>> { + value.clone() + } + + fn lift_delayed_literal( + &self, + _value: DelayedLiteral>, + ) -> DelayedLiteral> { + panic!("lift") + } + + fn into_ex_clause( + &mut self, + result: UnificationResult<'tcx>, + ex_clause: &mut ChalkExClause<'tcx> + ) { + into_ex_clause(result, ex_clause); + } +} + +crate fn into_ex_clause(result: UnificationResult<'tcx>, ex_clause: &mut ChalkExClause<'tcx>) { + ex_clause.subgoals.extend( + result.goals.into_iter().map(Literal::Positive) + ); + ex_clause.constraints.extend(result.constraints); +} + +type ChalkHhGoal<'tcx> = HhGoal>; + +type ChalkExClause<'tcx> = ExClause>; + +impl Debug for ChalkContext<'cx, 'gcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ChalkContext") + } +} + +impl Debug for ChalkInferenceContext<'cx, 'gcx, 'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ChalkInferenceContext") + } +} + +impl ExClauseLift<'gcx> for ChalkArenas<'a> { + type LiftedExClause = ChalkExClause<'gcx>; + + fn lift_ex_clause_to_tcx( + _ex_clause: &ChalkExClause<'a>, + _tcx: TyCtxt<'_, '_, 'tcx>, + ) -> Option { + panic!() + } +} + +impl ExClauseFold<'tcx> for ChalkArenas<'tcx> { + fn fold_ex_clause_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>( + ex_clause: &ChalkExClause<'tcx>, + folder: &mut F, + ) -> ChalkExClause<'tcx> { + ExClause { + subst: ex_clause.subst.fold_with(folder), + delayed_literals: ex_clause.delayed_literals.fold_with(folder), + constraints: ex_clause.constraints.fold_with(folder), + subgoals: ex_clause.subgoals.fold_with(folder), + } + } + + fn visit_ex_clause_with<'gcx: 'tcx, V: TypeVisitor<'tcx>>( + ex_clause: &ExClause, + visitor: &mut V, + ) -> bool { + let ExClause { + subst, + delayed_literals, + constraints, + subgoals, + } = ex_clause; + subst.visit_with(visitor) + && delayed_literals.visit_with(visitor) + && constraints.visit_with(visitor) + && subgoals.visit_with(visitor) + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for ConstrainedSubst<'a> { + type Lifted = ConstrainedSubst<'tcx>; + + subst, constraints + } +} + +trait Upcast<'tcx, 'gcx: 'tcx>: 'gcx { + type Upcasted: 'tcx; + + fn upcast(&self) -> Self::Upcasted; +} + +impl<'tcx, 'gcx: 'tcx> Upcast<'tcx, 'gcx> for DelayedLiteral> { + type Upcasted = DelayedLiteral>; + + fn upcast(&self) -> Self::Upcasted { + match self { + &DelayedLiteral::CannotProve(..) => DelayedLiteral::CannotProve(()), + &DelayedLiteral::Negative(index) => DelayedLiteral::Negative(index), + DelayedLiteral::Positive(index, subst) => DelayedLiteral::Positive( + *index, + subst.clone() + ), + } + } +} + +impl<'tcx, 'gcx: 'tcx> Upcast<'tcx, 'gcx> for Literal> { + type Upcasted = Literal>; + + fn upcast(&self) -> Self::Upcasted { + match self { + &Literal::Negative(goal) => Literal::Negative(goal), + &Literal::Positive(goal) => Literal::Positive(goal), + } + } +} + +impl<'tcx, 'gcx: 'tcx> Upcast<'tcx, 'gcx> for ExClause> { + type Upcasted = ExClause>; + + fn upcast(&self) -> Self::Upcasted { + ExClause { + subst: self.subst.clone(), + delayed_literals: self.delayed_literals + .iter() + .map(|l| l.upcast()) + .collect(), + constraints: self.constraints.clone(), + subgoals: self.subgoals + .iter() + .map(|g| g.upcast()) + .collect(), + } + } +} + +impl<'tcx, 'gcx: 'tcx, T> Upcast<'tcx, 'gcx> for Canonical<'gcx, T> + where T: Upcast<'tcx, 'gcx> +{ + type Upcasted = Canonical<'tcx, T::Upcasted>; + + fn upcast(&self) -> Self::Upcasted { + Canonical { + max_universe: self.max_universe, + value: self.value.upcast(), + variables: self.variables, + } + } +} diff --git a/src/librustc_traits/chalk_context/program_clauses.rs b/src/librustc_traits/chalk_context/program_clauses.rs new file mode 100644 index 0000000000..b8670e5e91 --- /dev/null +++ b/src/librustc_traits/chalk_context/program_clauses.rs @@ -0,0 +1,480 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::traits::{ + WellFormed, + FromEnv, + DomainGoal, + GoalKind, + Clause, + Clauses, + ProgramClause, + ProgramClauseCategory, + Environment, +}; +use rustc::ty; +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc_target::spec::abi; +use super::ChalkInferenceContext; +use crate::lowering::Lower; +use std::iter; + +fn assemble_clauses_from_impls<'tcx>( + tcx: ty::TyCtxt<'_, '_, 'tcx>, + trait_def_id: DefId, + clauses: &mut Vec> +) { + tcx.for_each_impl(trait_def_id, |impl_def_id| { + clauses.extend( + tcx.program_clauses_for(impl_def_id) + .into_iter() + .cloned() + ); + }); +} + +fn assemble_clauses_from_assoc_ty_values<'tcx>( + tcx: ty::TyCtxt<'_, '_, 'tcx>, + trait_def_id: DefId, + clauses: &mut Vec> +) { + tcx.for_each_impl(trait_def_id, |impl_def_id| { + for def_id in tcx.associated_item_def_ids(impl_def_id).iter() { + clauses.extend( + tcx.program_clauses_for(*def_id) + .into_iter() + .cloned() + ); + } + }); +} + +fn program_clauses_for_raw_ptr<'tcx>(tcx: ty::TyCtxt<'_, '_, 'tcx>) -> Clauses<'tcx> { + let ty = ty::Bound( + ty::INNERMOST, + ty::BoundVar::from_u32(0).into() + ); + let ty = tcx.mk_ty(ty); + + let ptr_ty = tcx.mk_ptr(ty::TypeAndMut { + ty, + mutbl: hir::Mutability::MutImmutable, + }); + + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(ptr_ty)), + hypotheses: ty::List::empty(), + category: ProgramClauseCategory::WellFormed, + }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); + + // `forall { WellFormed(*const T). }` + tcx.mk_clauses(iter::once(wf_clause)) +} + +fn program_clauses_for_fn_ptr<'tcx>( + tcx: ty::TyCtxt<'_, '_, 'tcx>, + arity_and_output: usize, + variadic: bool, + unsafety: hir::Unsafety, + abi: abi::Abi +) -> Clauses<'tcx> { + let inputs_and_output = tcx.mk_type_list( + (0..arity_and_output).into_iter() + .map(|i| ty::BoundVar::from(i)) + // DebruijnIndex(1) because we are going to inject these in a `PolyFnSig` + .map(|var| tcx.mk_ty(ty::Bound(ty::DebruijnIndex::from(1usize), var.into()))) + ); + + let fn_sig = ty::Binder::bind(ty::FnSig { + inputs_and_output, + variadic, + unsafety, + abi, + }); + let fn_ptr = tcx.mk_fn_ptr(fn_sig); + + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(fn_ptr)), + hypotheses: ty::List::empty(), + category: ProgramClauseCategory::WellFormed, + }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); + + // `forall { WellFormed(for<> fn(T1, ..., Tn) -> Tn+1). }` + // where `n + 1` == `arity_and_output` + tcx.mk_clauses(iter::once(wf_clause)) +} + +fn program_clauses_for_slice<'tcx>(tcx: ty::TyCtxt<'_, '_, 'tcx>) -> Clauses<'tcx> { + let ty = ty::Bound( + ty::INNERMOST, + ty::BoundVar::from_u32(0).into() + ); + let ty = tcx.mk_ty(ty); + + let slice_ty = tcx.mk_slice(ty); + + let sized_trait = match tcx.lang_items().sized_trait() { + Some(def_id) => def_id, + None => return ty::List::empty(), + }; + let sized_implemented = ty::TraitRef { + def_id: sized_trait, + substs: tcx.mk_substs_trait(ty, ty::List::empty()), + }; + let sized_implemented: DomainGoal = ty::TraitPredicate { + trait_ref: sized_implemented + }.lower(); + + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(slice_ty)), + hypotheses: tcx.mk_goals( + iter::once(tcx.mk_goal(GoalKind::DomainGoal(sized_implemented))) + ), + category: ProgramClauseCategory::WellFormed, + }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); + + // `forall { WellFormed([T]) :- Implemented(T: Sized). }` + tcx.mk_clauses(iter::once(wf_clause)) +} + +fn program_clauses_for_array<'tcx>( + tcx: ty::TyCtxt<'_, '_, 'tcx>, + length: &'tcx ty::Const<'tcx> +) -> Clauses<'tcx> { + let ty = ty::Bound( + ty::INNERMOST, + ty::BoundVar::from_u32(0).into() + ); + let ty = tcx.mk_ty(ty); + + let array_ty = tcx.mk_ty(ty::Array(ty, length)); + + let sized_trait = match tcx.lang_items().sized_trait() { + Some(def_id) => def_id, + None => return ty::List::empty(), + }; + let sized_implemented = ty::TraitRef { + def_id: sized_trait, + substs: tcx.mk_substs_trait(ty, ty::List::empty()), + }; + let sized_implemented: DomainGoal = ty::TraitPredicate { + trait_ref: sized_implemented + }.lower(); + + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(array_ty)), + hypotheses: tcx.mk_goals( + iter::once(tcx.mk_goal(GoalKind::DomainGoal(sized_implemented))) + ), + category: ProgramClauseCategory::WellFormed, + }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); + + // `forall { WellFormed([T; length]) :- Implemented(T: Sized). }` + tcx.mk_clauses(iter::once(wf_clause)) +} + +fn program_clauses_for_tuple<'tcx>( + tcx: ty::TyCtxt<'_, '_, 'tcx>, + arity: usize +) -> Clauses<'tcx> { + let type_list = tcx.mk_type_list( + (0..arity).into_iter() + .map(|i| ty::BoundVar::from(i)) + .map(|var| tcx.mk_ty(ty::Bound(ty::INNERMOST, var.into()))) + ); + + let tuple_ty = tcx.mk_ty(ty::Tuple(type_list)); + + let sized_trait = match tcx.lang_items().sized_trait() { + Some(def_id) => def_id, + None => return ty::List::empty(), + }; + let sized_implemented = type_list[0..arity - 1].iter() + .map(|ty| ty::TraitRef { + def_id: sized_trait, + substs: tcx.mk_substs_trait(*ty, ty::List::empty()), + }) + .map(|trait_ref| ty::TraitPredicate { trait_ref }) + .map(|predicate| predicate.lower()); + + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(tuple_ty)), + hypotheses: tcx.mk_goals( + sized_implemented.map(|domain_goal| { + tcx.mk_goal(GoalKind::DomainGoal(domain_goal)) + }) + ), + category: ProgramClauseCategory::WellFormed, + }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); + + // ``` + // forall { + // WellFormed((T1, ..., Tn)) :- + // Implemented(T1: Sized), + // ... + // Implemented(Tn-1: Sized). + // } + // ``` + tcx.mk_clauses(iter::once(wf_clause)) +} + +fn program_clauses_for_ref<'tcx>(tcx: ty::TyCtxt<'_, '_, 'tcx>) -> Clauses<'tcx> { + let region = tcx.mk_region( + ty::ReLateBound(ty::INNERMOST, ty::BoundRegion::BrAnon(0)) + ); + let ty = tcx.mk_ty( + ty::Bound(ty::INNERMOST, ty::BoundVar::from_u32(1).into()) + ); + + let ref_ty = tcx.mk_ref(region, ty::TypeAndMut { + ty, + mutbl: hir::Mutability::MutImmutable, + }); + + let outlives: DomainGoal = ty::OutlivesPredicate(ty, region).lower(); + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(ref_ty)), + hypotheses: tcx.mk_goals( + iter::once(tcx.mk_goal(outlives.into_goal())) + ), + category: ProgramClauseCategory::ImpliedBound, + }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); + + // `forall<'a, T> { WellFormed(&'a T) :- Outlives(T: 'a). }` + tcx.mk_clauses(iter::once(wf_clause)) +} + +impl ChalkInferenceContext<'cx, 'gcx, 'tcx> { + pub(super) fn program_clauses_impl( + &self, + environment: &Environment<'tcx>, + goal: &DomainGoal<'tcx>, + ) -> Vec> { + use rustc::traits::WhereClause::*; + + let mut clauses = match goal { + DomainGoal::Holds(Implemented(trait_predicate)) => { + // These come from: + // * implementations of the trait itself (rule `Implemented-From-Impl`) + // * the trait decl (rule `Implemented-From-Env`) + + let mut clauses = vec![]; + assemble_clauses_from_impls( + self.infcx.tcx, + trait_predicate.def_id(), + &mut clauses + ); + + // FIXME: we need to add special rules for builtin impls: + // * `Copy` / `Clone` + // * `Sized` + // * `Unsize` + // * `Generator` + // * `FnOnce` / `FnMut` / `Fn` + // * trait objects + // * auto traits + + // Rule `Implemented-From-Env` will be computed from the environment. + clauses + } + + DomainGoal::Holds(ProjectionEq(projection_predicate)) => { + // These come from: + // * the assoc type definition (rule `ProjectionEq-Placeholder`) + // * normalization of the assoc ty values (rule `ProjectionEq-Normalize`) + // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) + // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) + + let clauses = self.infcx.tcx.program_clauses_for( + projection_predicate.projection_ty.item_def_id + ).into_iter() + + // only select `ProjectionEq-Placeholder` and `ProjectionEq-Normalize` + .filter(|clause| clause.category() == ProgramClauseCategory::Other) + + .cloned() + .collect::>(); + + // Rules `Implied-Bound-From-Trait` and `Implied-Bound-From-Type` will be computed + // from the environment. + clauses + } + + DomainGoal::Holds(RegionOutlives(..)) => { + // These come from: + // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) + // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) + + // All of these rules are computed in the environment. + vec![] + } + + DomainGoal::Holds(TypeOutlives(..)) => { + // These come from: + // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) + // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) + + // All of these rules are computed in the environment. + vec![] + } + + DomainGoal::WellFormed(WellFormed::Trait(trait_predicate)) => { + // These come from -- the trait decl (rule `WellFormed-TraitRef`). + self.infcx.tcx.program_clauses_for(trait_predicate.def_id()) + .into_iter() + + // only select `WellFormed-TraitRef` + .filter(|clause| clause.category() == ProgramClauseCategory::WellFormed) + + .cloned() + .collect() + } + + DomainGoal::WellFormed(WellFormed::Ty(ty)) => { + // These come from: + // * the associated type definition if `ty` refers to an unnormalized + // associated type (rule `WellFormed-AssocTy`) + // * custom rules for built-in types + // * the type definition otherwise (rule `WellFormed-Type`) + let clauses = match ty.sty { + ty::Projection(data) => { + self.infcx.tcx.program_clauses_for(data.item_def_id) + } + + // These types are always WF and non-parametric. + ty::Bool | + ty::Char | + ty::Int(..) | + ty::Uint(..) | + ty::Float(..) | + ty::Str | + ty::Never => { + let wf_clause = ProgramClause { + goal: DomainGoal::WellFormed(WellFormed::Ty(ty)), + hypotheses: ty::List::empty(), + category: ProgramClauseCategory::WellFormed, + }; + let wf_clause = Clause::ForAll(ty::Binder::dummy(wf_clause)); + + self.infcx.tcx.mk_clauses(iter::once(wf_clause)) + } + + // Always WF (recall that we do not check for parameters to be WF). + ty::RawPtr(..) => program_clauses_for_raw_ptr(self.infcx.tcx), + + // Always WF (recall that we do not check for parameters to be WF). + ty::FnPtr(fn_ptr) => { + let fn_ptr = fn_ptr.skip_binder(); + program_clauses_for_fn_ptr( + self.infcx.tcx, + fn_ptr.inputs_and_output.len(), + fn_ptr.variadic, + fn_ptr.unsafety, + fn_ptr.abi + ) + } + + // WF if inner type is `Sized`. + ty::Slice(..) => program_clauses_for_slice(self.infcx.tcx), + + // WF if inner type is `Sized`. + ty::Array(_, length) => program_clauses_for_array(self.infcx.tcx, length), + + // WF if all types but the last one are `Sized`. + ty::Tuple(types) => program_clauses_for_tuple( + self.infcx.tcx, + types.len() + ), + + // WF if `sub_ty` outlives `region`. + ty::Ref(..) => program_clauses_for_ref(self.infcx.tcx), + + ty::Dynamic(..) => { + // FIXME: no rules yet for trait objects + ty::List::empty() + } + + ty::Adt(def, ..) => { + self.infcx.tcx.program_clauses_for(def.did) + } + + ty::Foreign(def_id) | + ty::FnDef(def_id, ..) | + ty::Closure(def_id, ..) | + ty::Generator(def_id, ..) | + ty::Opaque(def_id, ..) => { + self.infcx.tcx.program_clauses_for(def_id) + } + + ty::GeneratorWitness(..) | + ty::Placeholder(..) | + ty::UnnormalizedProjection(..) | + ty::Infer(..) | + ty::Bound(..) | + ty::Param(..) | + ty::Error => { + bug!("unexpected type {:?}", ty) + } + }; + + clauses.into_iter() + .filter(|clause| clause.category() == ProgramClauseCategory::WellFormed) + .cloned() + .collect() + } + + DomainGoal::FromEnv(FromEnv::Trait(..)) => { + // These come from: + // * implied bounds from trait definitions (rule `Implied-Bound-From-Trait`) + // * implied bounds from type definitions (rule `Implied-Bound-From-Type`) + // * implied bounds from assoc type defs (rules `Implied-Trait-From-AssocTy`, + // `Implied-Bound-From-AssocTy` and `Implied-WC-From-AssocTy`) + + // All of these rules are computed in the environment. + vec![] + } + + DomainGoal::FromEnv(FromEnv::Ty(..)) => { + // There are no `FromEnv::Ty(..) :- ...` rules (this predicate only + // comes from the environment). + vec![] + } + + DomainGoal::Normalize(projection_predicate) => { + // These come from -- assoc ty values (rule `Normalize-From-Impl`). + let mut clauses = vec![]; + + assemble_clauses_from_assoc_ty_values( + self.infcx.tcx, + projection_predicate.projection_ty.trait_ref(self.infcx.tcx).def_id, + &mut clauses + ); + + clauses + } + }; + + let environment = self.infcx.tcx.lift_to_global(environment) + .expect("environment is not global"); + clauses.extend( + self.infcx.tcx.program_clauses_for_env(environment) + .into_iter() + .cloned() + ); + clauses + } +} diff --git a/src/librustc_traits/chalk_context/resolvent_ops.rs b/src/librustc_traits/chalk_context/resolvent_ops.rs new file mode 100644 index 0000000000..df6458a766 --- /dev/null +++ b/src/librustc_traits/chalk_context/resolvent_ops.rs @@ -0,0 +1,241 @@ +use chalk_engine::fallible::{Fallible, NoSolution}; +use chalk_engine::{ + context, + Literal, + ExClause +}; +use rustc::infer::{InferCtxt, LateBoundRegionConversionTime}; +use rustc::infer::canonical::{Canonical, CanonicalVarValues}; +use rustc::traits::{ + DomainGoal, + Goal, + GoalKind, + Clause, + ProgramClause, + Environment, + InEnvironment, +}; +use rustc::ty::{self, Ty}; +use rustc::ty::subst::Kind; +use rustc::ty::relate::{Relate, RelateResult, TypeRelation}; +use syntax_pos::DUMMY_SP; + +use super::{ChalkInferenceContext, ChalkArenas, ChalkExClause, ConstrainedSubst}; +use super::unify::*; + +impl context::ResolventOps, ChalkArenas<'tcx>> + for ChalkInferenceContext<'cx, 'gcx, 'tcx> +{ + fn resolvent_clause( + &mut self, + environment: &Environment<'tcx>, + goal: &DomainGoal<'tcx>, + subst: &CanonicalVarValues<'tcx>, + clause: &Clause<'tcx>, + ) -> Fallible>> { + use chalk_engine::context::UnificationOps; + + self.infcx.probe(|_| { + let ProgramClause { + goal: consequence, + hypotheses, + .. + } = match clause { + Clause::Implies(program_clause) => *program_clause, + Clause::ForAll(program_clause) => self.infcx.replace_bound_vars_with_fresh_vars( + DUMMY_SP, + LateBoundRegionConversionTime::HigherRankedType, + program_clause + ).0, + }; + + let result = unify(self.infcx, *environment, goal, &consequence) + .map_err(|_| NoSolution)?; + + let mut ex_clause = ExClause { + subst: subst.clone(), + delayed_literals: vec![], + constraints: vec![], + subgoals: vec![], + }; + + self.into_ex_clause(result, &mut ex_clause); + + ex_clause.subgoals.extend( + hypotheses.iter().map(|g| match g { + GoalKind::Not(g) => Literal::Negative(environment.with(*g)), + g => Literal::Positive(environment.with(*g)), + }) + ); + + let canonical_ex_clause = self.canonicalize_ex_clause(&ex_clause); + Ok(canonical_ex_clause) + }) + } + + fn apply_answer_subst( + &mut self, + ex_clause: ChalkExClause<'tcx>, + selected_goal: &InEnvironment<'tcx, Goal<'tcx>>, + answer_table_goal: &Canonical<'gcx, InEnvironment<'gcx, Goal<'gcx>>>, + canonical_answer_subst: &Canonical<'gcx, ConstrainedSubst<'gcx>>, + ) -> Fallible> { + let (answer_subst, _) = self.infcx.instantiate_canonical_with_fresh_inference_vars( + DUMMY_SP, + canonical_answer_subst + ); + + let mut substitutor = AnswerSubstitutor { + infcx: self.infcx, + environment: selected_goal.environment, + answer_subst: answer_subst.subst, + binder_index: ty::INNERMOST, + ex_clause, + }; + + substitutor.relate(&answer_table_goal.value, &selected_goal) + .map_err(|_| NoSolution)?; + + let mut ex_clause = substitutor.ex_clause; + ex_clause.constraints.extend(answer_subst.constraints); + Ok(ex_clause) + } +} + +struct AnswerSubstitutor<'cx, 'gcx: 'tcx, 'tcx: 'cx> { + infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + environment: Environment<'tcx>, + answer_subst: CanonicalVarValues<'tcx>, + binder_index: ty::DebruijnIndex, + ex_clause: ChalkExClause<'tcx>, +} + +impl AnswerSubstitutor<'cx, 'gcx, 'tcx> { + fn unify_free_answer_var( + &mut self, + answer_var: ty::BoundVar, + pending: Kind<'tcx> + ) -> RelateResult<'tcx, ()> { + let answer_param = &self.answer_subst.var_values[answer_var]; + let pending = &ty::fold::shift_out_vars( + self.infcx.tcx, + &pending, + self.binder_index.as_u32() + ); + + super::into_ex_clause( + unify(self.infcx, self.environment, answer_param, pending)?, + &mut self.ex_clause + ); + + Ok(()) + } +} + +impl TypeRelation<'cx, 'gcx, 'tcx> for AnswerSubstitutor<'cx, 'gcx, 'tcx> { + fn tcx(&self) -> ty::TyCtxt<'cx, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn tag(&self) -> &'static str { + "chalk_context::answer_substitutor" + } + + fn a_is_expected(&self) -> bool { + true + } + + fn relate_with_variance>( + &mut self, + _variance: ty::Variance, + a: &T, + b: &T, + ) -> RelateResult<'tcx, T> { + // We don't care about variance. + self.relate(a, b) + } + + fn binders>( + &mut self, + a: &ty::Binder, + b: &ty::Binder, + ) -> RelateResult<'tcx, ty::Binder> { + self.binder_index.shift_in(1); + let result = self.relate(a.skip_binder(), b.skip_binder())?; + self.binder_index.shift_out(1); + Ok(ty::Binder::bind(result)) + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + let b = self.infcx.shallow_resolve(b); + + if let &ty::Bound(debruijn, bound_ty) = &a.sty { + // Free bound var + if debruijn == self.binder_index { + self.unify_free_answer_var(bound_ty.var, b.into())?; + return Ok(b); + } + } + + match (&a.sty, &b.sty) { + (&ty::Bound(a_debruijn, a_bound), &ty::Bound(b_debruijn, b_bound)) => { + assert_eq!(a_debruijn, b_debruijn); + assert_eq!(a_bound.var, b_bound.var); + Ok(a) + } + + // Those should have been canonicalized away. + (ty::Placeholder(..), _) => { + bug!("unexpected placeholder ty in `AnswerSubstitutor`: {:?} ", a); + } + + // Everything else should just be a perfect match as well, + // and we forbid inference variables. + _ => match ty::relate::super_relate_tys(self, a, b) { + Ok(ty) => Ok(ty), + Err(err) => bug!("type mismatch in `AnswerSubstitutor`: {}", err), + } + } + } + + fn regions( + &mut self, + a: ty::Region<'tcx>, + b: ty::Region<'tcx>, + ) -> RelateResult<'tcx, ty::Region<'tcx>> { + let b = match b { + &ty::ReVar(vid) => self.infcx + .borrow_region_constraints() + .opportunistic_resolve_var(self.infcx.tcx, vid), + + other => other, + }; + + if let &ty::ReLateBound(debruijn, bound) = a { + // Free bound region + if debruijn == self.binder_index { + self.unify_free_answer_var(bound.assert_bound_var(), b.into())?; + return Ok(b); + } + } + + match (a, b) { + (&ty::ReLateBound(a_debruijn, a_bound), &ty::ReLateBound(b_debruijn, b_bound)) => { + assert_eq!(a_debruijn, b_debruijn); + assert_eq!(a_bound.assert_bound_var(), b_bound.assert_bound_var()); + } + + (ty::ReStatic, ty::ReStatic) | + (ty::ReErased, ty::ReErased) | + (ty::ReEmpty, ty::ReEmpty) => (), + + (&ty::ReFree(a_free), &ty::ReFree(b_free)) => { + assert_eq!(a_free, b_free); + } + + _ => bug!("unexpected regions in `AnswerSubstitutor`: {:?}, {:?}", a, b), + } + + Ok(a) + } +} diff --git a/src/librustc_traits/chalk_context/unify.rs b/src/librustc_traits/chalk_context/unify.rs new file mode 100644 index 0000000000..3a9c3918d1 --- /dev/null +++ b/src/librustc_traits/chalk_context/unify.rs @@ -0,0 +1,98 @@ +use rustc::infer::nll_relate::{TypeRelating, TypeRelatingDelegate, NormalizationStrategy}; +use rustc::infer::{InferCtxt, RegionVariableOrigin}; +use rustc::traits::{DomainGoal, Goal, Environment, InEnvironment}; +use rustc::ty::relate::{Relate, TypeRelation, RelateResult}; +use rustc::ty; +use syntax_pos::DUMMY_SP; + +crate struct UnificationResult<'tcx> { + crate goals: Vec>>, + crate constraints: Vec>, +} + +crate fn unify<'me, 'gcx, 'tcx, T: Relate<'tcx>>( + infcx: &'me InferCtxt<'me, 'gcx, 'tcx>, + environment: Environment<'tcx>, + a: &T, + b: &T +) -> RelateResult<'tcx, UnificationResult<'tcx>> { + let mut delegate = ChalkTypeRelatingDelegate::new( + infcx, + environment + ); + + TypeRelating::new( + infcx, + &mut delegate, + ty::Variance::Invariant + ).relate(a, b)?; + + Ok(UnificationResult { + goals: delegate.goals, + constraints: delegate.constraints, + }) +} + +struct ChalkTypeRelatingDelegate<'me, 'gcx: 'tcx, 'tcx: 'me> { + infcx: &'me InferCtxt<'me, 'gcx, 'tcx>, + environment: Environment<'tcx>, + goals: Vec>>, + constraints: Vec>, +} + +impl ChalkTypeRelatingDelegate<'me, 'gcx, 'tcx> { + fn new( + infcx: &'me InferCtxt<'me, 'gcx, 'tcx>, + environment: Environment<'tcx>, + ) -> Self { + Self { + infcx, + environment, + goals: Vec::new(), + constraints: Vec::new(), + } + } +} + +impl TypeRelatingDelegate<'tcx> for &mut ChalkTypeRelatingDelegate<'_, '_, 'tcx> { + fn create_next_universe(&mut self) -> ty::UniverseIndex { + self.infcx.create_next_universe() + } + + fn next_existential_region_var(&mut self) -> ty::Region<'tcx> { + self.infcx.next_region_var(RegionVariableOrigin::MiscVariable(DUMMY_SP)) + } + + fn next_placeholder_region( + &mut self, + placeholder: ty::PlaceholderRegion + ) -> ty::Region<'tcx> { + self.infcx.tcx.mk_region(ty::RePlaceholder(placeholder)) + } + + fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> { + self.infcx.next_region_var_in_universe( + RegionVariableOrigin::MiscVariable(DUMMY_SP), + universe + ) + } + + fn push_outlives(&mut self, sup: ty::Region<'tcx>, sub: ty::Region<'tcx>) { + self.constraints.push(ty::OutlivesPredicate(sup.into(), sub)); + } + + fn push_domain_goal(&mut self, domain_goal: DomainGoal<'tcx>) { + let goal = self.environment.with( + self.infcx.tcx.mk_goal(domain_goal.into_goal()) + ); + self.goals.push(goal); + } + + fn normalization() -> NormalizationStrategy { + NormalizationStrategy::Lazy + } + + fn forbid_inference_vars() -> bool { + false + } +} diff --git a/src/librustc_traits/dropck_outlives.rs b/src/librustc_traits/dropck_outlives.rs index 2ad7ab7c4d..9ab86daf65 100644 --- a/src/librustc_traits/dropck_outlives.rs +++ b/src/librustc_traits/dropck_outlives.rs @@ -274,7 +274,7 @@ fn dtorck_constraint_for_ty<'a, 'gcx, 'tcx>( ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"), - ty::Infer(..) | ty::Error => { + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error => { // By the time this code runs, all type variables ought to // be fully resolved. Err(NoSolution) diff --git a/src/librustc_traits/implied_outlives_bounds.rs b/src/librustc_traits/implied_outlives_bounds.rs index ad0a54e392..7514c2c18e 100644 --- a/src/librustc_traits/implied_outlives_bounds.rs +++ b/src/librustc_traits/implied_outlives_bounds.rs @@ -20,6 +20,7 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::outlives::Component; use rustc::ty::query::Providers; use rustc::ty::wf; +use smallvec::{SmallVec, smallvec}; use syntax::ast::DUMMY_NODE_ID; use syntax::source_map::DUMMY_SP; use rustc::traits::FulfillmentContext; @@ -108,7 +109,7 @@ fn compute_implied_outlives_bounds<'tcx>( // From the full set of obligations, just filter down to the // region relationships. implied_bounds.extend(obligations.into_iter().flat_map(|obligation| { - assert!(!obligation.has_escaping_regions()); + assert!(!obligation.has_escaping_bound_vars()); match obligation.predicate { ty::Predicate::Trait(..) | ty::Predicate::Subtype(..) | @@ -122,18 +123,19 @@ fn compute_implied_outlives_bounds<'tcx>( vec![] } - ty::Predicate::RegionOutlives(ref data) => match data.no_late_bound_regions() { + ty::Predicate::RegionOutlives(ref data) => match data.no_bound_vars() { None => vec![], Some(ty::OutlivesPredicate(r_a, r_b)) => { vec![OutlivesBound::RegionSubRegion(r_b, r_a)] } }, - ty::Predicate::TypeOutlives(ref data) => match data.no_late_bound_regions() { + ty::Predicate::TypeOutlives(ref data) => match data.no_bound_vars() { None => vec![], Some(ty::OutlivesPredicate(ty_a, r_b)) => { let ty_a = infcx.resolve_type_vars_if_possible(&ty_a); - let components = tcx.outlives_components(ty_a); + let mut components = smallvec![]; + tcx.push_outlives_components(ty_a, &mut components); implied_bounds_from_components(r_b, components) } }, @@ -155,18 +157,18 @@ fn compute_implied_outlives_bounds<'tcx>( /// those relationships. fn implied_bounds_from_components( sub_region: ty::Region<'tcx>, - sup_components: Vec>, + sup_components: SmallVec<[Component<'tcx>; 4]>, ) -> Vec> { sup_components .into_iter() - .flat_map(|component| { + .filter_map(|component| { match component { Component::Region(r) => - vec![OutlivesBound::RegionSubRegion(sub_region, r)], + Some(OutlivesBound::RegionSubRegion(sub_region, r)), Component::Param(p) => - vec![OutlivesBound::RegionSubParam(sub_region, p)], + Some(OutlivesBound::RegionSubParam(sub_region, p)), Component::Projection(p) => - vec![OutlivesBound::RegionSubProjection(sub_region, p)], + Some(OutlivesBound::RegionSubProjection(sub_region, p)), Component::EscapingProjection(_) => // If the projection has escaping regions, don't // try to infer any implied bounds even for its @@ -176,9 +178,9 @@ fn implied_bounds_from_components( // idea is that the WAY that the caller proves // that may change in the future and we want to // give ourselves room to get smarter here. - vec![], + None, Component::UnresolvedInferenceVariable(..) => - vec![], + None, } }) .collect() diff --git a/src/librustc_traits/lib.rs b/src/librustc_traits/lib.rs index 733804fb9b..4a3806d6cb 100644 --- a/src/librustc_traits/lib.rs +++ b/src/librustc_traits/lib.rs @@ -23,6 +23,7 @@ extern crate log; #[macro_use] extern crate rustc; extern crate rustc_data_structures; +extern crate rustc_target; extern crate syntax; extern crate syntax_pos; extern crate smallvec; diff --git a/src/librustc_traits/lowering/environment.rs b/src/librustc_traits/lowering/environment.rs index c71898f73e..519b0ac610 100644 --- a/src/librustc_traits/lowering/environment.rs +++ b/src/librustc_traits/lowering/environment.rs @@ -20,6 +20,8 @@ use rustc::traits::{ use rustc::ty::{self, TyCtxt, Ty}; use rustc::hir::def_id::DefId; use rustc_data_structures::fx::FxHashSet; +use super::Lower; +use std::iter; struct ClauseVisitor<'set, 'a, 'tcx: 'a + 'set> { tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -45,9 +47,32 @@ impl ClauseVisitor<'set, 'a, 'tcx> { ); } - // forall<'a, T> { `Outlives(T, 'a) :- FromEnv(&'a T)` } - ty::Ref(_region, _sub_ty, ..) => { - // FIXME: we'd need bound tys in order to properly write the above rule + // forall<'a, T> { `Outlives(T: 'a) :- FromEnv(&'a T)` } + ty::Ref(..) => { + use rustc::hir; + + let region = self.tcx.mk_region( + ty::ReLateBound(ty::INNERMOST, ty::BoundRegion::BrAnon(0)) + ); + let ty = self.tcx.mk_ty( + ty::Bound(ty::INNERMOST, ty::BoundVar::from_u32(1).into()) + ); + + let ref_ty = self.tcx.mk_ref(region, ty::TypeAndMut { + ty, + mutbl: hir::Mutability::MutImmutable, + }); + let from_env = DomainGoal::FromEnv(FromEnv::Ty(ref_ty)); + + let clause = ProgramClause { + goal: ty::OutlivesPredicate(ty, region).lower(), + hypotheses: self.tcx.mk_goals( + iter::once(self.tcx.mk_goal(from_env.into_goal())) + ), + category: ProgramClauseCategory::ImpliedBound, + }; + let clause = Clause::ForAll(ty::Binder::bind(clause)); + self.round.insert(clause); } ty::Dynamic(..) => { @@ -88,11 +113,13 @@ impl ClauseVisitor<'set, 'a, 'tcx> { ty::FnPtr(..) | ty::Tuple(..) | ty::Never | - ty::Param(..) => (), + ty::Infer(..) | + ty::Placeholder(..) | + ty::Bound(..) => (), ty::GeneratorWitness(..) | ty::UnnormalizedProjection(..) | - ty::Infer(..) | + ty::Param(..) | ty::Error => { bug!("unexpected type {:?}", ty); } @@ -172,21 +199,28 @@ crate fn program_clauses_for_env<'a, 'tcx>( ); } -crate fn environment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Environment<'tcx> { +crate fn environment<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId +) -> ty::Binder> { use super::{Lower, IntoFromEnvGoal}; use rustc::hir::{Node, TraitItemKind, ImplItemKind, ItemKind, ForeignItemKind}; + use rustc::ty::subst::{Subst, Substs}; // The environment of an impl Trait type is its defining function's environment. if let Some(parent) = ty::is_impl_trait_defn(tcx, def_id) { return environment(tcx, parent); } + let bound_vars = Substs::bound_vars_for_item(tcx, def_id); + // Compute the bounds on `Self` and the type parameters. - let ty::InstantiatedPredicates { predicates } = - tcx.predicates_of(def_id).instantiate_identity(tcx); + let ty::InstantiatedPredicates { predicates } = tcx.predicates_of(def_id) + .instantiate_identity(tcx); let clauses = predicates.into_iter() .map(|predicate| predicate.lower()) + .map(|predicate| predicate.subst(tcx, bound_vars)) .map(|domain_goal| domain_goal.map_bound(|bound| bound.into_from_env_goal())) .map(|domain_goal| domain_goal.map_bound(|bound| bound.into_program_clause())) @@ -227,33 +261,43 @@ crate fn environment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> En let mut input_tys = FxHashSet::default(); - // In an impl, we assume that the receiver type and all its constituents + // In an impl, we assume that the header trait ref and all its constituents // are well-formed. if is_impl { - let trait_ref = tcx.impl_trait_ref(def_id).expect("not an impl"); - input_tys.extend(trait_ref.self_ty().walk()); + let trait_ref = tcx.impl_trait_ref(def_id) + .expect("not an impl") + .subst(tcx, bound_vars); + + input_tys.extend( + trait_ref.substs.types().flat_map(|ty| ty.walk()) + ); } // In an fn, we assume that the arguments and all their constituents are // well-formed. if is_fn { - let fn_sig = tcx.fn_sig(def_id); + // `skip_binder` because we move region parameters to the root binder, + // restored in the return type of this query + let fn_sig = tcx.fn_sig(def_id).skip_binder().subst(tcx, bound_vars); + input_tys.extend( - // FIXME: `skip_binder` seems ok for now? In a real setting, - // the late bound regions would next be instantiated with things - // in the inference table. - fn_sig.skip_binder().inputs().iter().flat_map(|ty| ty.walk()) + fn_sig.inputs().iter().flat_map(|ty| ty.walk()) ); } let clauses = clauses.chain( input_tys.into_iter() + // Filter out type parameters + .filter(|ty| match ty.sty { + ty::Bound(..) => false, + _ => true, + }) .map(|ty| DomainGoal::FromEnv(FromEnv::Ty(ty))) .map(|domain_goal| domain_goal.into_program_clause()) .map(Clause::Implies) ); - Environment { + ty::Binder::bind(Environment { clauses: tcx.mk_clauses(clauses), - } + }) } diff --git a/src/librustc_traits/lowering/mod.rs b/src/librustc_traits/lowering/mod.rs index 46581397ae..2d8e5b48aa 100644 --- a/src/librustc_traits/lowering/mod.rs +++ b/src/librustc_traits/lowering/mod.rs @@ -28,6 +28,7 @@ use rustc::traits::{ }; use rustc::ty::query::Providers; use rustc::ty::{self, List, TyCtxt}; +use rustc::ty::subst::{Subst, Substs}; use syntax::ast; use std::iter; @@ -112,13 +113,14 @@ impl<'tcx> Lower> for ty::Predicate<'tcx> { Predicate::RegionOutlives(predicate) => predicate.lower(), Predicate::TypeOutlives(predicate) => predicate.lower(), Predicate::Projection(predicate) => predicate.lower(), - Predicate::WellFormed(ty) => { - ty::Binder::dummy(DomainGoal::WellFormed(WellFormed::Ty(*ty))) + + Predicate::WellFormed(..) | + Predicate::ObjectSafe(..) | + Predicate::ClosureKind(..) | + Predicate::Subtype(..) | + Predicate::ConstEvaluatable(..) => { + bug!("unexpected predicate {}", self) } - Predicate::ObjectSafe(..) - | Predicate::ClosureKind(..) - | Predicate::Subtype(..) - | Predicate::ConstEvaluatable(..) => unimplemented!(), } } } @@ -189,9 +191,14 @@ fn program_clauses_for_trait<'a, 'tcx>( // } // ``` + let bound_vars = Substs::bound_vars_for_item(tcx, def_id); + // `Self: Trait` let trait_pred = ty::TraitPredicate { - trait_ref: ty::TraitRef::identity(tcx, def_id), + trait_ref: ty::TraitRef { + def_id, + substs: bound_vars, + }, }; // `Implemented(Self: Trait)` @@ -208,11 +215,13 @@ fn program_clauses_for_trait<'a, 'tcx>( category: ProgramClauseCategory::ImpliedBound, }; - let clauses = iter::once(Clause::ForAll(ty::Binder::dummy(implemented_from_env))); + let implemented_from_env = Clause::ForAll(ty::Binder::bind(implemented_from_env)); - let where_clauses = &tcx.predicates_defined_on(def_id).predicates - .into_iter() + let predicates = &tcx.predicates_defined_on(def_id).predicates; + let where_clauses = &predicates + .iter() .map(|(wc, _)| wc.lower()) + .map(|wc| wc.subst(tcx, bound_vars)) .collect::>(); // Rule Implied-Bound-From-Trait @@ -230,11 +239,22 @@ fn program_clauses_for_trait<'a, 'tcx>( .cloned() // `FromEnv(WC) :- FromEnv(Self: Trait)` - .map(|wc| wc.map_bound(|goal| ProgramClause { - goal: goal.into_from_env_goal(), - hypotheses, - category: ProgramClauseCategory::ImpliedBound, - })) + .map(|wc| { + // we move binders to the left + wc.map_bound(|goal| ProgramClause { + goal: goal.into_from_env_goal(), + + // FIXME: As where clauses can only bind lifetimes for now, + // and that named bound regions have a def-id, it is safe + // to just inject `hypotheses` (which contains named vars bound at index `0`) + // into this binding level. This may change if we ever allow where clauses + // to bind types (e.g. for GATs things), because bound types only use a `BoundVar` + // index (no def-id). + hypotheses, + + category: ProgramClauseCategory::ImpliedBound, + }) + }) .map(Clause::ForAll); // Rule WellFormed-TraitRef @@ -246,28 +266,27 @@ fn program_clauses_for_trait<'a, 'tcx>( // } // ``` - // `Implemented(Self: Trait) && WellFormed(WC)` - let wf_conditions = iter::once(ty::Binder::dummy(trait_pred.lower())) - .chain( - where_clauses - .into_iter() - .map(|wc| wc.map_bound(|goal| goal.into_well_formed_goal())) - ); + // `WellFormed(WC)` + let wf_conditions = where_clauses + .into_iter() + .map(|wc| wc.map_bound(|goal| goal.into_well_formed_goal())); // `WellFormed(Self: Trait) :- Implemented(Self: Trait) && WellFormed(WC)` let wf_clause = ProgramClause { goal: DomainGoal::WellFormed(WellFormed::Trait(trait_pred)), hypotheses: tcx.mk_goals( - wf_conditions.map(|wc| tcx.mk_goal(GoalKind::from_poly_domain_goal(wc, tcx))), + iter::once(tcx.mk_goal(GoalKind::DomainGoal(impl_trait))).chain( + wf_conditions.map(|wc| tcx.mk_goal(GoalKind::from_poly_domain_goal(wc, tcx))) + ) ), category: ProgramClauseCategory::WellFormed, }; - let wf_clause = iter::once(Clause::ForAll(ty::Binder::dummy(wf_clause))); + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); tcx.mk_clauses( - clauses + iter::once(implemented_from_env) .chain(implied_bound_clauses) - .chain(wf_clause) + .chain(iter::once(wf_clause)) ) } @@ -286,15 +305,21 @@ fn program_clauses_for_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId // } // ``` - let trait_ref = tcx.impl_trait_ref(def_id).expect("not an impl"); + let bound_vars = Substs::bound_vars_for_item(tcx, def_id); + + let trait_ref = tcx.impl_trait_ref(def_id) + .expect("not an impl") + .subst(tcx, bound_vars); // `Implemented(A0: Trait)` let trait_pred = ty::TraitPredicate { trait_ref }.lower(); // `WC` - let where_clauses = tcx.predicates_of(def_id).predicates - .into_iter() - .map(|(wc, _)| wc.lower()); + let predicates = &tcx.predicates_of(def_id).predicates; + let where_clauses = predicates + .iter() + .map(|(wc, _)| wc.lower()) + .map(|wc| wc.subst(tcx, bound_vars)); // `Implemented(A0: Trait) :- WC` let clause = ProgramClause { @@ -305,7 +330,7 @@ fn program_clauses_for_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId ), category: ProgramClauseCategory::Other, }; - tcx.mk_clauses(iter::once(Clause::ForAll(ty::Binder::dummy(clause)))) + tcx.mk_clauses(iter::once(Clause::ForAll(ty::Binder::bind(clause)))) } pub fn program_clauses_for_type_def<'a, 'tcx>( @@ -322,17 +347,20 @@ pub fn program_clauses_for_type_def<'a, 'tcx>( // } // ``` + let bound_vars = Substs::bound_vars_for_item(tcx, def_id); + // `Ty<...>` - let ty = tcx.type_of(def_id); + let ty = tcx.type_of(def_id).subst(tcx, bound_vars); // `WC` let where_clauses = tcx.predicates_of(def_id).predicates - .into_iter() + .iter() .map(|(wc, _)| wc.lower()) + .map(|wc| wc.subst(tcx, bound_vars)) .collect::>(); // `WellFormed(Ty<...>) :- WC1, ..., WCm` - let well_formed = ProgramClause { + let well_formed_clause = ProgramClause { goal: DomainGoal::WellFormed(WellFormed::Ty(ty)), hypotheses: tcx.mk_goals( where_clauses @@ -342,10 +370,9 @@ pub fn program_clauses_for_type_def<'a, 'tcx>( ), category: ProgramClauseCategory::WellFormed, }; + let well_formed_clause = Clause::ForAll(ty::Binder::bind(well_formed_clause)); - let well_formed_clause = iter::once(Clause::ForAll(ty::Binder::dummy(well_formed))); - - // Rule FromEnv-Type + // Rule Implied-Bound-From-Type // // For each where clause `WC`: // ``` @@ -363,22 +390,30 @@ pub fn program_clauses_for_type_def<'a, 'tcx>( .into_iter() // `FromEnv(WC) :- FromEnv(Ty<...>)` - .map(|wc| wc.map_bound(|goal| ProgramClause { - goal: goal.into_from_env_goal(), - hypotheses, - category: ProgramClauseCategory::ImpliedBound, - })) + .map(|wc| { + // move the binders to the left + wc.map_bound(|goal| ProgramClause { + goal: goal.into_from_env_goal(), + + // FIXME: we inject `hypotheses` into this binding level, + // which may be incorrect in the future: see the FIXME in + // `program_clauses_for_trait` + hypotheses, + + category: ProgramClauseCategory::ImpliedBound, + }) + }) .map(Clause::ForAll); - tcx.mk_clauses(well_formed_clause.chain(from_env_clauses)) + tcx.mk_clauses(iter::once(well_formed_clause).chain(from_env_clauses)) } pub fn program_clauses_for_associated_type_def<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: DefId, ) -> Clauses<'tcx> { - // Rule ProjectionEq-Skolemize + // Rule ProjectionEq-Placeholder // // ``` // trait Trait { @@ -403,7 +438,12 @@ pub fn program_clauses_for_associated_type_def<'a, 'tcx>( ty::AssociatedItemContainer::TraitContainer(trait_id) => trait_id, _ => bug!("not an trait container"), }; - let trait_ref = ty::TraitRef::identity(tcx, trait_id); + + let trait_bound_vars = Substs::bound_vars_for_item(tcx, trait_id); + let trait_ref = ty::TraitRef { + def_id: trait_id, + substs: trait_bound_vars, + }; let projection_ty = ty::ProjectionTy::from_ref_and_name(tcx, trait_ref, item.ident); let placeholder_ty = tcx.mk_ty(ty::UnnormalizedProjection(projection_ty)); @@ -417,6 +457,7 @@ pub fn program_clauses_for_associated_type_def<'a, 'tcx>( hypotheses: ty::List::empty(), category: ProgramClauseCategory::Other, }; + let projection_eq_clause = Clause::ForAll(ty::Binder::bind(projection_eq_clause)); // Rule WellFormed-AssocTy // ``` @@ -430,11 +471,13 @@ pub fn program_clauses_for_associated_type_def<'a, 'tcx>( let hypothesis = tcx.mk_goal( DomainGoal::Holds(WhereClause::Implemented(trait_predicate)).into_goal() ); + let wf_clause = ProgramClause { goal: DomainGoal::WellFormed(WellFormed::Ty(placeholder_ty)), hypotheses: tcx.mk_goals(iter::once(hypothesis)), category: ProgramClauseCategory::WellFormed, }; + let wf_clause = Clause::ForAll(ty::Binder::bind(wf_clause)); // Rule Implied-Trait-From-AssocTy // ``` @@ -447,16 +490,60 @@ pub fn program_clauses_for_associated_type_def<'a, 'tcx>( let hypothesis = tcx.mk_goal( DomainGoal::FromEnv(FromEnv::Ty(placeholder_ty)).into_goal() ); + let from_env_clause = ProgramClause { goal: DomainGoal::FromEnv(FromEnv::Trait(trait_predicate)), hypotheses: tcx.mk_goals(iter::once(hypothesis)), category: ProgramClauseCategory::ImpliedBound, }; + let from_env_clause = Clause::ForAll(ty::Binder::bind(from_env_clause)); + + // Rule ProjectionEq-Normalize + // + // ProjectionEq can succeed by normalizing: + // ``` + // forall { + // ProjectionEq(>::AssocType = U) :- + // Normalize(>::AssocType -> U) + // } + // ``` + + let offset = tcx.generics_of(trait_id).params + .iter() + .map(|p| p.index) + .max() + .unwrap_or(0); + // Add a new type param after the existing ones (`U` in the comment above). + let ty_var = ty::Bound( + ty::INNERMOST, + ty::BoundVar::from_u32(offset + 1).into() + ); + + // `ProjectionEq(>::AssocType = U)` + let projection = ty::ProjectionPredicate { + projection_ty, + ty: tcx.mk_ty(ty_var), + }; + + // `Normalize(>::AssocType -> U)` + let hypothesis = tcx.mk_goal( + DomainGoal::Normalize(projection).into_goal() + ); + + // ProjectionEq(>::AssocType = U) :- + // Normalize(>::AssocType -> U) + let normalize_clause = ProgramClause { + goal: DomainGoal::Holds(WhereClause::ProjectionEq(projection)), + hypotheses: tcx.mk_goals(iter::once(hypothesis)), + category: ProgramClauseCategory::Other, + }; + let normalize_clause = Clause::ForAll(ty::Binder::bind(normalize_clause)); let clauses = iter::once(projection_eq_clause) .chain(iter::once(wf_clause)) - .chain(iter::once(from_env_clause)); - let clauses = clauses.map(|clause| Clause::ForAll(ty::Binder::dummy(clause))); + .chain(iter::once(from_env_clause)) + .chain(iter::once(normalize_clause)); + tcx.mk_clauses(clauses) } @@ -490,17 +577,18 @@ pub fn program_clauses_for_associated_type_value<'a, 'tcx>( _ => bug!("not an impl container"), }; + let impl_bound_vars = Substs::bound_vars_for_item(tcx, impl_id); + // `A0 as Trait` - let trait_ref = tcx.impl_trait_ref(impl_id).unwrap(); + let trait_ref = tcx.impl_trait_ref(impl_id) + .unwrap() + .subst(tcx, impl_bound_vars); // `T` let ty = tcx.type_of(item_id); // `Implemented(A0: Trait)` - let trait_implemented = ty::Binder::dummy(ty::TraitPredicate { trait_ref }.lower()); - - // `Implemented(A0: Trait)` - let hypotheses = vec![trait_implemented]; + let trait_implemented: DomainGoal = ty::TraitPredicate { trait_ref }.lower(); // `>::AssocType` let projection_ty = ty::ProjectionTy::from_ref_and_name(tcx, trait_ref, item.ident); @@ -509,16 +597,16 @@ pub fn program_clauses_for_associated_type_value<'a, 'tcx>( let normalize_goal = DomainGoal::Normalize(ty::ProjectionPredicate { projection_ty, ty }); // `Normalize(... -> T) :- ...` - let clause = ProgramClause { + let normalize_clause = ProgramClause { goal: normalize_goal, hypotheses: tcx.mk_goals( - hypotheses - .into_iter() - .map(|wc| tcx.mk_goal(GoalKind::from_poly_domain_goal(wc, tcx))), + iter::once(tcx.mk_goal(GoalKind::DomainGoal(trait_implemented))) ), category: ProgramClauseCategory::Other, }; - tcx.mk_clauses(iter::once(Clause::ForAll(ty::Binder::dummy(clause)))) + let normalize_clause = Clause::ForAll(ty::Binder::bind(normalize_clause)); + + tcx.mk_clauses(iter::once(normalize_clause)) } pub fn dump_program_clauses<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { @@ -548,7 +636,7 @@ impl<'a, 'tcx> ClauseDumper<'a, 'tcx> { if attr.check_name("rustc_dump_env_program_clauses") { let environment = self.tcx.environment(def_id); - clauses = Some(self.tcx.program_clauses_for_env(environment)); + clauses = Some(self.tcx.program_clauses_for_env(*environment.skip_binder())); } if let Some(clauses) = clauses { @@ -559,14 +647,7 @@ impl<'a, 'tcx> ClauseDumper<'a, 'tcx> { let mut strings: Vec<_> = clauses .iter() - .map(|clause| { - // Skip the top-level binder for a less verbose output - let program_clause = match clause { - Clause::Implies(program_clause) => program_clause, - Clause::ForAll(program_clause) => program_clause.skip_binder(), - }; - program_clause.to_string() - }) + .map(|clause| clause.to_string()) .collect(); strings.sort(); diff --git a/src/librustc_tsan/Cargo.toml b/src/librustc_tsan/Cargo.toml index 8bb67c0bba..baadb64511 100644 --- a/src/librustc_tsan/Cargo.toml +++ b/src/librustc_tsan/Cargo.toml @@ -15,6 +15,5 @@ cmake = "0.1.18" [dependencies] alloc = { path = "../liballoc" } -alloc_system = { path = "../liballoc_system" } core = { path = "../libcore" } compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/librustc_tsan/lib.rs b/src/librustc_tsan/lib.rs index 7b845e631f..47f917e40c 100644 --- a/src/librustc_tsan/lib.rs +++ b/src/librustc_tsan/lib.rs @@ -9,7 +9,6 @@ // except according to those terms. #![sanitizer_runtime] -#![feature(alloc_system)] #![feature(nll)] #![feature(sanitizer_runtime)] #![feature(staged_api)] @@ -17,10 +16,3 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] - -extern crate alloc_system; - -use alloc_system::System; - -#[global_allocator] -static ALLOC: System = System; diff --git a/src/librustc_typeck/README.md b/src/librustc_typeck/README.md index f00597cb27..fdcbd93552 100644 --- a/src/librustc_typeck/README.md +++ b/src/librustc_typeck/README.md @@ -1,5 +1,5 @@ For high-level intro to how type checking works in rustc, see the [type checking] chapter of the [rustc guide]. -[type checking]: https://rust-lang-nursery.github.io/rustc-guide/type-checking.html -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ +[type checking]: https://rust-lang.github.io/rustc-guide/type-checking.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/ diff --git a/src/librustc_typeck/astconv.rs b/src/librustc_typeck/astconv.rs index 1978f86545..4fbbe58445 100644 --- a/src/librustc_typeck/astconv.rs +++ b/src/librustc_typeck/astconv.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Conversion from AST representation of types to the ty.rs +//! Conversion from AST representation of types to the `ty.rs` //! representation. The main routine here is `ast_ty_to_ty()`: each use //! is parameterized by an instance of `AstConv`. @@ -24,6 +24,7 @@ use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, ToPredicate, TypeFoldable}; use rustc::ty::{GenericParamDef, GenericParamDefKind}; use rustc::ty::wf::object_region_bounds; +use rustc_data_structures::sync::Lrc; use rustc_target::spec::abi; use std::collections::BTreeSet; use std::slice; @@ -35,9 +36,10 @@ use lint; use std::iter; use syntax::ast; -use syntax::ptr::P; use syntax::feature_gate::{GateIssue, emit_feature_err}; -use syntax_pos::{Span, MultiSpan}; +use syntax::ptr::P; +use syntax::util::lev_distance::find_best_match_for_name; +use syntax_pos::{DUMMY_SP, Span, MultiSpan}; pub trait AstConv<'gcx, 'tcx> { fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>; @@ -45,7 +47,7 @@ pub trait AstConv<'gcx, 'tcx> { /// Returns the set of bounds in scope for the type parameter with /// the given id. fn get_type_parameter_bounds(&self, span: Span, def_id: DefId) - -> ty::GenericPredicates<'tcx>; + -> Lrc>; /// What lifetime should we use when a lifetime is omitted (and not elided)? fn re_infer(&self, span: Span, _def: Option<&ty::GenericParamDef>) @@ -181,8 +183,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { item_segment: &hir::PathSegment) -> &'tcx Substs<'tcx> { - - let (substs, assoc_bindings) = item_segment.with_generic_args(|generic_args| { + let (substs, assoc_bindings, _) = item_segment.with_generic_args(|generic_args| { self.create_substs_for_ast_path( span, def_id, @@ -256,7 +257,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { }, def.parent.is_none() && def.has_self, // `has_self` seg.infer_types || suppress_mismatch, // `infer_types` - ) + ).0 } /// Check that the correct number of generic arguments have been provided. @@ -269,7 +270,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { position: GenericArgPosition, has_self: bool, infer_types: bool, - ) -> bool { + ) -> (bool, Option>) { // At this stage we are guaranteed that the generic arguments are in the correct order, e.g. // that lifetimes will proceed types. So it suffices to check the number of each generic // arguments in order to validate them with respect to the generic parameters. @@ -303,13 +304,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { let mut err = tcx.sess.struct_span_err(span, msg); err.span_note(span_late, note); err.emit(); - return true; + return (true, None); } else { let mut multispan = MultiSpan::from_span(span); multispan.push_span_label(span_late, note.to_string()); tcx.lint_node(lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS, args.args[0].id(), multispan, msg); - return false; + return (false, None); } } } @@ -323,7 +324,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { // For kinds without defaults (i.e. lifetimes), `required == permitted`. // For other kinds (i.e. types), `permitted` may be greater than `required`. if required <= provided && provided <= permitted { - return false; + return (false, None); } // Unfortunately lifetime and type parameter mismatches are typically styled @@ -338,33 +339,28 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { (required, "") }; - let mut span = span; - let label = if required == permitted && provided > permitted { - let diff = provided - permitted; - if diff == 1 { - // In the case when the user has provided too many arguments, - // we want to point to the first unexpected argument. - let first_superfluous_arg: &GenericArg = &args.args[offset + permitted]; - span = first_superfluous_arg.span(); - } - format!( - "{}unexpected {} argument{}", - if diff != 1 { format!("{} ", diff) } else { String::new() }, - kind, - if diff != 1 { "s" } else { "" }, - ) + let mut potential_assoc_types: Option> = None; + let (spans, label) = if required == permitted && provided > permitted { + // In the case when the user has provided too many arguments, + // we want to point to the unexpected arguments. + let spans: Vec = args.args[offset+permitted .. offset+provided] + .iter() + .map(|arg| arg.span()) + .collect(); + potential_assoc_types = Some(spans.clone()); + (spans, format!( "unexpected {} argument", kind)) } else { - format!( + (vec![span], format!( "expected {}{} {} argument{}", quantifier, bound, kind, if bound != 1 { "s" } else { "" }, - ) + )) }; - tcx.sess.struct_span_err_with_code( - span, + let mut err = tcx.sess.struct_span_err_with_code( + spans.clone(), &format!( "wrong number of {} arguments: expected {}{}, found {}", kind, @@ -373,9 +369,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { provided, ), DiagnosticId::Error("E0107".into()) - ).span_label(span, label).emit(); + ); + for span in spans { + err.span_label(span, label.as_str()); + } + err.emit(); - provided > required // `suppress_error` + (provided > required, // `suppress_error` + potential_assoc_types) }; if !infer_lifetimes || arg_counts.lifetimes > param_counts.lifetimes { @@ -397,7 +398,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { arg_counts.lifetimes, ) } else { - false + (false, None) } } @@ -452,7 +453,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { } // We manually build up the substitution, rather than using convenience - // methods in subst.rs so that we can iterate over the arguments and + // methods in `subst.rs` so that we can iterate over the arguments and // parameters in lock-step linearly, rather than trying to match each pair. let mut substs: SmallVec<[Kind<'tcx>; 8]> = SmallVec::with_capacity(count); @@ -470,7 +471,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { } } - // (Unless it's been handled in `parent_substs`) `Self` is handled first. + // `Self` is handled first, unless it's been handled in `parent_substs`. if has_self { if let Some(¶m) = params.peek() { if param.index == 0 { @@ -545,7 +546,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { } /// Given the type/region arguments provided to some path (along with - /// an implicit Self, if this is a trait reference) returns the complete + /// an implicit `Self`, if this is a trait reference) returns the complete /// set of substitutions. This may involve applying defaulted type parameters. /// /// Note that the type listing given here is *exactly* what the user provided. @@ -555,7 +556,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { generic_args: &hir::GenericArgs, infer_types: bool, self_ty: Option>) - -> (&'tcx Substs<'tcx>, Vec>) + -> (&'tcx Substs<'tcx>, Vec>, Option>) { // If the type is parameterized by this region, then replace this // region with the current anon region binding (in other words, @@ -571,7 +572,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { assert_eq!(generic_params.has_self, self_ty.is_some()); let has_self = generic_params.has_self; - Self::check_generic_arg_count( + let (_, potential_assoc_types) = Self::check_generic_arg_count( self.tcx(), span, &generic_params, @@ -676,7 +677,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { debug!("create_substs_for_ast_path(generic_params={:?}, self_ty={:?}) -> {:?}", generic_params, self_ty, substs); - (substs, assoc_bindings) + (substs, assoc_bindings, potential_assoc_types) } /// Instantiates the path for the given trait reference, assuming that it's @@ -699,7 +700,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { trait_ref.path.segments.last().unwrap()) } - /// Get the DefId of the given trait ref. It _must_ actually be a trait. + /// Get the `DefId` of the given trait ref. It _must_ actually be a trait. fn trait_def_id(&self, trait_ref: &hir::TraitRef) -> DefId { let path = &trait_ref.path; match path.def { @@ -712,25 +713,26 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { } } - /// The given `trait_ref` must actually be trait. + /// The given trait ref must actually be a trait. pub(super) fn instantiate_poly_trait_ref_inner(&self, trait_ref: &hir::TraitRef, self_ty: Ty<'tcx>, poly_projections: &mut Vec<(ty::PolyProjectionPredicate<'tcx>, Span)>, speculative: bool) - -> ty::PolyTraitRef<'tcx> + -> (ty::PolyTraitRef<'tcx>, Option>) { let trait_def_id = self.trait_def_id(trait_ref); - debug!("ast_path_to_poly_trait_ref({:?}, def_id={:?})", trait_ref, trait_def_id); + debug!("instantiate_poly_trait_ref({:?}, def_id={:?})", trait_ref, trait_def_id); self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1); - let (substs, assoc_bindings) = - self.create_substs_for_ast_trait_ref(trait_ref.path.span, - trait_def_id, - self_ty, - trait_ref.path.segments.last().unwrap()); + let (substs, assoc_bindings, potential_assoc_types) = self.create_substs_for_ast_trait_ref( + trait_ref.path.span, + trait_def_id, + self_ty, + trait_ref.path.segments.last().unwrap(), + ); let poly_trait_ref = ty::Binder::bind(ty::TraitRef::new(trait_def_id, substs)); let mut dup_bindings = FxHashMap::default(); @@ -739,20 +741,20 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { let predicate: Result<_, ErrorReported> = self.ast_type_binding_to_poly_projection_predicate( trait_ref.ref_id, poly_trait_ref, binding, speculative, &mut dup_bindings); - // ok to ignore Err() because ErrorReported (see above) + // okay to ignore Err because of ErrorReported (see above) Some((predicate.ok()?, binding.span)) })); - debug!("ast_path_to_poly_trait_ref({:?}, projections={:?}) -> {:?}", + debug!("instantiate_poly_trait_ref({:?}, projections={:?}) -> {:?}", trait_ref, poly_projections, poly_trait_ref); - poly_trait_ref + (poly_trait_ref, potential_assoc_types) } pub fn instantiate_poly_trait_ref(&self, poly_trait_ref: &hir::PolyTraitRef, self_ty: Ty<'tcx>, poly_projections: &mut Vec<(ty::PolyProjectionPredicate<'tcx>, Span)>) - -> ty::PolyTraitRef<'tcx> + -> (ty::PolyTraitRef<'tcx>, Option>) { self.instantiate_poly_trait_ref_inner(&poly_trait_ref.trait_ref, self_ty, poly_projections, false) @@ -765,7 +767,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { trait_segment: &hir::PathSegment) -> ty::TraitRef<'tcx> { - let (substs, assoc_bindings) = + let (substs, assoc_bindings, _) = self.create_substs_for_ast_trait_ref(span, trait_def_id, self_ty, @@ -774,13 +776,13 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { ty::TraitRef::new(trait_def_id, substs) } - fn create_substs_for_ast_trait_ref(&self, - span: Span, - trait_def_id: DefId, - self_ty: Ty<'tcx>, - trait_segment: &hir::PathSegment) - -> (&'tcx Substs<'tcx>, Vec>) - { + fn create_substs_for_ast_trait_ref( + &self, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + trait_segment: &hir::PathSegment, + ) -> (&'tcx Substs<'tcx>, Vec>, Option>) { debug!("create_substs_for_ast_trait_ref(trait_segment={:?})", trait_segment); @@ -832,7 +834,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { let tcx = self.tcx(); if !speculative { - // Given something like `U : SomeTrait`, we want to produce a + // Given something like `U: SomeTrait`, we want to produce a // predicate like `::T = X`. This is somewhat // subtle in the event that `T` is defined in a supertrait of // `SomeTrait`, because in that case we need to upcast. @@ -840,7 +842,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { // That is, consider this case: // // ``` - // trait SubTrait : SuperTrait { } + // trait SubTrait: SuperTrait { } // trait SuperTrait { type T; } // // ... B : SubTrait ... @@ -909,16 +911,14 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { if !speculative { dup_bindings.entry(assoc_ty.def_id) .and_modify(|prev_span| { - let mut err = self.tcx().struct_span_lint_node( - ::rustc::lint::builtin::DUPLICATE_ASSOCIATED_TYPE_BINDINGS, - ref_id, - binding.span, - &format!("associated type binding `{}` specified more than once", - binding.item_name) - ); - err.span_label(binding.span, "used more than once"); - err.span_label(*prev_span, format!("first use of `{}`", binding.item_name)); - err.emit(); + struct_span_err!(self.tcx().sess, binding.span, E0719, + "the value of the associated type `{}` (from the trait `{}`) \ + is already specified", + binding.item_name, + tcx.item_path_str(assoc_ty.container.id())) + .span_label(binding.span, "re-bound here") + .span_label(*prev_span, format!("`{}` bound here first", binding.item_name)) + .emit(); }) .or_insert(binding.span); } @@ -948,8 +948,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { ) } - /// Transform a PolyTraitRef into a PolyExistentialTraitRef by - /// removing the dummy Self type (TRAIT_OBJECT_DUMMY_SELF). + /// Transform a `PolyTraitRef` into a `PolyExistentialTraitRef` by + /// removing the dummy `Self` type (`TRAIT_OBJECT_DUMMY_SELF`). fn trait_ref_to_existential(&self, trait_ref: ty::TraitRef<'tcx>) -> ty::ExistentialTraitRef<'tcx> { assert_eq!(trait_ref.self_ty().sty, TRAIT_OBJECT_DUMMY_SELF); @@ -970,14 +970,17 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { return tcx.types.err; } - let mut projection_bounds = vec![]; + let mut projection_bounds = Vec::new(); let dummy_self = tcx.mk_ty(TRAIT_OBJECT_DUMMY_SELF); - let principal = self.instantiate_poly_trait_ref(&trait_bounds[0], - dummy_self, - &mut projection_bounds); + let (principal, potential_assoc_types) = self.instantiate_poly_trait_ref( + &trait_bounds[0], + dummy_self, + &mut projection_bounds, + ); + debug!("principal: {:?}", principal); for trait_bound in trait_bounds[1..].iter() { - // Sanity check for non-principal trait bounds + // sanity check for non-principal trait bounds self.instantiate_poly_trait_ref(trait_bound, dummy_self, &mut vec![]); @@ -994,7 +997,111 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { .emit(); } - // Erase the dummy_self (TRAIT_OBJECT_DUMMY_SELF) used above. + // Check that there are no gross object safety violations; + // most importantly, that the supertraits don't contain `Self`, + // to avoid ICEs. + let object_safety_violations = + tcx.global_tcx().astconv_object_safety_violations(principal.def_id()); + if !object_safety_violations.is_empty() { + tcx.report_object_safety_error( + span, principal.def_id(), object_safety_violations) + .emit(); + return tcx.types.err; + } + + // Use a `BTreeSet` to keep output in a more consistent order. + let mut associated_types = BTreeSet::default(); + + for tr in traits::elaborate_trait_ref(tcx, principal) { + match tr { + ty::Predicate::Trait(pred) => { + associated_types.extend(tcx.associated_items(pred.def_id()) + .filter(|item| item.kind == ty::AssociatedKind::Type) + .map(|item| item.def_id)); + } + ty::Predicate::Projection(pred) => { + // Include projections defined on supertraits. + projection_bounds.push((pred, DUMMY_SP)) + } + _ => () + } + } + + for (projection_bound, _) in &projection_bounds { + associated_types.remove(&projection_bound.projection_def_id()); + } + + if !associated_types.is_empty() { + let names = associated_types.iter().map(|item_def_id| { + let assoc_item = tcx.associated_item(*item_def_id); + let trait_def_id = assoc_item.container.id(); + format!( + "`{}` (from the trait `{}`)", + assoc_item.ident, + tcx.item_path_str(trait_def_id), + ) + }).collect::>().join(", "); + let mut err = struct_span_err!( + tcx.sess, + span, + E0191, + "the value of the associated type{} {} must be specified", + if associated_types.len() == 1 { "" } else { "s" }, + names, + ); + let mut suggest = false; + let mut potential_assoc_types_spans = vec![]; + if let Some(potential_assoc_types) = potential_assoc_types { + if potential_assoc_types.len() == associated_types.len() { + // Only suggest when the amount of missing associated types is equals to the + // extra type arguments present, as that gives us a relatively high confidence + // that the user forgot to give the associtated type's name. The canonical + // example would be trying to use `Iterator` instead of + // `Iterator`. + suggest = true; + potential_assoc_types_spans = potential_assoc_types; + } + } + let mut suggestions = vec![]; + for (i, item_def_id) in associated_types.iter().enumerate() { + let assoc_item = tcx.associated_item(*item_def_id); + err.span_label( + span, + format!("associated type `{}` must be specified", assoc_item.ident), + ); + if item_def_id.is_local() { + err.span_label( + tcx.def_span(*item_def_id), + format!("`{}` defined here", assoc_item.ident), + ); + } + if suggest { + if let Ok(snippet) = tcx.sess.source_map().span_to_snippet( + potential_assoc_types_spans[i], + ) { + suggestions.push(( + potential_assoc_types_spans[i], + format!("{} = {}", assoc_item.ident, snippet), + )); + } + } + } + if !suggestions.is_empty() { + let msg = if suggestions.len() == 1 { + "if you meant to specify the associated type, write" + } else { + "if you meant to specify the associated types, write" + }; + err.multipart_suggestion_with_applicability( + msg, + suggestions, + Applicability::MaybeIncorrect, + ); + } + err.emit(); + } + + // Erase the `dummy_self` (`TRAIT_OBJECT_DUMMY_SELF`) used above. let existential_principal = principal.map_bound(|trait_ref| { self.trait_ref_to_existential(trait_ref) }); @@ -1009,48 +1116,11 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { }) }); - // check that there are no gross object safety violations, - // most importantly, that the supertraits don't contain Self, - // to avoid ICE-s. - let object_safety_violations = - tcx.astconv_object_safety_violations(principal.def_id()); - if !object_safety_violations.is_empty() { - tcx.report_object_safety_error( - span, principal.def_id(), object_safety_violations) - .emit(); - return tcx.types.err; - } - - // use a btreeset to keep output in a more consistent order - let mut associated_types = BTreeSet::default(); - - for tr in traits::supertraits(tcx, principal) { - associated_types.extend(tcx.associated_items(tr.def_id()) - .filter(|item| item.kind == ty::AssociatedKind::Type) - .map(|item| item.def_id)); - } - - for (projection_bound, _) in &projection_bounds { - associated_types.remove(&projection_bound.projection_def_id()); - } - - for item_def_id in associated_types { - let assoc_item = tcx.associated_item(item_def_id); - let trait_def_id = assoc_item.container.id(); - struct_span_err!(tcx.sess, span, E0191, "the value of the associated type `{}` \ - (from the trait `{}`) must be specified", - assoc_item.ident, - tcx.item_path_str(trait_def_id)) - .span_label(span, format!("missing associated type `{}` value", - assoc_item.ident)) - .emit(); - } - // Dedup auto traits so that `dyn Trait + Send + Send` is the same as `dyn Trait + Send`. auto_traits.sort(); auto_traits.dedup(); - // skip_binder is okay, because the predicates are re-bound. + // Calling `skip_binder` is okay, because the predicates are re-bound. let mut v = iter::once(ty::ExistentialPredicate::Trait(*existential_principal.skip_binder())) .chain(auto_traits.into_iter().map(ty::ExistentialPredicate::AutoTrait)) @@ -1060,7 +1130,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { v.sort_by(|a, b| a.stable_cmp(tcx, b)); let existential_predicates = ty::Binder::bind(tcx.mk_existential_predicates(v.into_iter())); - // Explicitly specified region bound. Use that. + // Use explicitly-specified region bound. let region_bound = if !lifetime.is_elided() { self.ast_region_to_region(lifetime, None) } else { @@ -1112,12 +1182,12 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { { let tcx = self.tcx(); - let bounds: Vec<_> = self.get_type_parameter_bounds(span, ty_param_def_id) - .predicates.into_iter().filter_map(|(p, _)| p.to_opt_poly_trait_ref()).collect(); + let predicates = &self.get_type_parameter_bounds(span, ty_param_def_id).predicates; + let bounds = predicates.iter().filter_map(|(p, _)| p.to_opt_poly_trait_ref()); // Check that there is exactly one way to find an associated type with the // correct name. - let suitable_bounds = traits::transitive_bounds(tcx, &bounds) + let suitable_bounds = traits::transitive_bounds(tcx, bounds) .filter(|b| self.trait_defines_associated_type_named(b.def_id(), assoc_name)); let param_node_id = tcx.hir.as_local_node_id(ty_param_def_id).unwrap(); @@ -1128,8 +1198,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { span) } - - // Checks that bounds contains exactly one element and reports appropriate + // Checks that `bounds` contains exactly one element and reports appropriate // errors otherwise. fn one_bound_for_assoc_type(&self, mut bounds: I, @@ -1186,11 +1255,11 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { } // Create a type from a path to an associated type. - // For a path A::B::C::D, ty and ty_path_def are the type and def for A::B::C - // and item_segment is the path segment for D. We return a type and a def for + // For a path `A::B::C::D`, `ty` and `ty_path_def` are the type and def for `A::B::C` + // and item_segment is the path segment for `D`. We return a type and a def for // the whole path. - // Will fail except for T::A and Self::A; i.e., if ty/ty_path_def are not a type - // parameter or Self. + // Will fail except for `T::A` and `Self::A`; i.e., if `ty`/`ty_path_def` are not a type + // parameter or `Self`. pub fn associated_path_def_to_ty(&self, ref_id: ast::NodeId, span: Span, @@ -1210,7 +1279,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { // item is declared. let bound = match (&ty.sty, ty_path_def) { (_, Def::SelfTy(Some(_), Some(impl_def_id))) => { - // `Self` in an impl of a trait - we have a concrete self type and a + // `Self` in an impl of a trait - we have a concrete `self` type and a // trait reference. let trait_ref = match tcx.impl_trait_ref(impl_def_id) { Some(trait_ref) => trait_ref, @@ -1235,6 +1304,32 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { Err(ErrorReported) => return (tcx.types.err, Def::Err), } } + (&ty::Adt(adt_def, _substs), Def::Enum(_did)) => { + let ty_str = ty.to_string(); + // Incorrect enum variant + let mut err = tcx.sess.struct_span_err( + span, + &format!("no variant `{}` on enum `{}`", &assoc_name.as_str(), ty_str), + ); + // Check if it was a typo + let input = adt_def.variants.iter().map(|variant| &variant.name); + if let Some(suggested_name) = find_best_match_for_name( + input, + &assoc_name.as_str(), + None, + ) { + err.span_suggestion_with_applicability( + span, + "did you mean", + format!("{}::{}", ty_str, suggested_name.to_string()), + Applicability::MaybeIncorrect, + ); + } else { + err.span_label(span, "unknown variant"); + } + err.emit(); + return (tcx.types.err, Def::Err); + } _ => { // Don't print TyErr to the user. if !ty.references_error() { @@ -1347,7 +1442,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { err.span_label(span, "associated type not allowed here").emit(); } - // Check a type Path and convert it to a Ty. + // Check a type `Path` and convert it to a `Ty`. pub fn def_to_ty(&self, opt_self_ty: Option>, path: &hir::Path, @@ -1361,7 +1456,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { let span = path.span; match path.def { Def::Existential(did) => { - // check for desugared impl trait + // Check for desugared impl trait. assert!(ty::is_impl_trait_defn(tcx, did).is_none()); let item_segment = path.segments.split_last().unwrap(); self.prohibit_generics(item_segment.1); @@ -1398,7 +1493,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { tcx.mk_ty_param(index, tcx.hir.name(node_id).as_interned_str()) } Def::SelfTy(_, Some(def_id)) => { - // Self in impl (we know the concrete type). + // `Self` in impl (we know the concrete type) assert_eq!(opt_self_ty, None); self.prohibit_generics(&path.segments); @@ -1406,7 +1501,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { tcx.at(span).type_of(def_id) } Def::SelfTy(Some(_), None) => { - // Self in trait. + // `Self` in trait assert_eq!(opt_self_ty, None); self.prohibit_generics(&path.segments); tcx.mk_self_type() @@ -1442,8 +1537,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx>+'o { /// Parses the programmer's textual representation of a type into our /// internal notion of a type. pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> { - debug!("ast_ty_to_ty(id={:?}, ast_ty={:?})", - ast_ty.id, ast_ty); + debug!("ast_ty_to_ty(id={:?}, ast_ty={:?} ty_ty={:?})", + ast_ty.id, ast_ty, ast_ty.node); let tcx = self.tcx(); @@ -1748,7 +1843,7 @@ impl<'a, 'gcx, 'tcx> Bounds<'tcx> { self.region_bounds.iter().map(|&(region_bound, span)| { // account for the binder being introduced below; no need to shift `param_ty` // because, at present at least, it can only refer to early-bound regions - let region_bound = tcx.mk_region(ty::fold::shift_region(*region_bound, 1)); + let region_bound = ty::fold::shift_region(tcx, region_bound, 1); let outlives = ty::OutlivesPredicate(param_ty, region_bound); (ty::Binder::dummy(outlives).to_predicate(), span) }).chain( diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index cd674c05ab..a477df6ae2 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -834,7 +834,7 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); // Replace constructor type with constructed type for tuple struct patterns. let pat_ty = pat_ty.fn_sig(tcx).output(); - let pat_ty = pat_ty.no_late_bound_regions().expect("expected fn type"); + let pat_ty = pat_ty.no_bound_vars().expect("expected fn type"); self.demand_eqtype(pat.span, expected, pat_ty); diff --git a/src/librustc_typeck/check/autoderef.rs b/src/librustc_typeck/check/autoderef.rs index 73489309d0..2cd2bb5064 100644 --- a/src/librustc_typeck/check/autoderef.rs +++ b/src/librustc_typeck/check/autoderef.rs @@ -59,7 +59,7 @@ impl<'a, 'gcx, 'tcx> Iterator for Autoderef<'a, 'gcx, 'tcx> { if self.steps.len() >= *tcx.sess.recursion_limit.get() { // We've reached the recursion limit, error gracefully. let suggested_limit = *tcx.sess.recursion_limit.get() * 2; - let msg = format!("reached the recursion limit while auto-dereferencing {:?}", + let msg = format!("reached the recursion limit while auto-dereferencing `{:?}`", self.cur_ty); let error_id = (DiagnosticMessageId::ErrorId(55), Some(self.span), msg); let fresh = tcx.sess.one_time_diagnostics.borrow_mut().insert(error_id); @@ -67,7 +67,7 @@ impl<'a, 'gcx, 'tcx> Iterator for Autoderef<'a, 'gcx, 'tcx> { struct_span_err!(tcx.sess, self.span, E0055, - "reached the recursion limit while auto-dereferencing {:?}", + "reached the recursion limit while auto-dereferencing `{:?}`", self.cur_ty) .span_label(self.span, "deref recursion limit reached") .help(&format!( diff --git a/src/librustc_typeck/check/callee.rs b/src/librustc_typeck/check/callee.rs index de4293aaae..7a71cf57a2 100644 --- a/src/librustc_typeck/check/callee.rs +++ b/src/librustc_typeck/check/callee.rs @@ -110,10 +110,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // fnmut vs fnonce. If so, we have to defer further processing. if self.closure_kind(def_id, substs).is_none() { let closure_ty = self.closure_sig(def_id, substs); - let fn_sig = self.replace_late_bound_regions_with_fresh_var(call_expr.span, - infer::FnCall, - &closure_ty) - .0; + let fn_sig = self.replace_bound_vars_with_fresh_vars( + call_expr.span, + infer::FnCall, + &closure_ty + ).0; let adjustments = autoderef.adjust_steps(Needs::None); self.record_deferred_call_resolution(def_id, DeferredCallResolution { call_expr, @@ -218,35 +219,62 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - let mut err = type_error_struct!( - self.tcx.sess, - call_expr.span, - callee_ty, - E0618, - "expected function, found {}", - match unit_variant { - Some(ref path) => format!("enum variant `{}`", path), - None => format!("`{}`", callee_ty), - }); + if let hir::ExprKind::Call(ref callee, _) = call_expr.node { + let mut err = type_error_struct!( + self.tcx.sess, + callee.span, + callee_ty, + E0618, + "expected function, found {}", + match unit_variant { + Some(ref path) => format!("enum variant `{}`", path), + None => format!("`{}`", callee_ty), + }); - err.span_label(call_expr.span, "not a function"); + if let Some(ref path) = unit_variant { + err.span_suggestion_with_applicability( + call_expr.span, + &format!("`{}` is a unit variant, you need to write it \ + without the parenthesis", path), + path.to_string(), + Applicability::MachineApplicable + ); + } - if let Some(ref path) = unit_variant { - err.span_suggestion_with_applicability( - call_expr.span, - &format!("`{}` is a unit variant, you need to write it \ - without the parenthesis", path), - path.to_string(), - Applicability::MachineApplicable - ); - } - - if let hir::ExprKind::Call(ref expr, _) = call_expr.node { - let def = if let hir::ExprKind::Path(ref qpath) = expr.node { - self.tables.borrow().qpath_def(qpath, expr.hir_id) - } else { - Def::Err + let mut inner_callee_path = None; + let def = match callee.node { + hir::ExprKind::Path(ref qpath) => { + self.tables.borrow().qpath_def(qpath, callee.hir_id) + }, + hir::ExprKind::Call(ref inner_callee, _) => { + // If the call spans more than one line and the callee kind is + // itself another `ExprCall`, that's a clue that we might just be + // missing a semicolon (Issue #51055) + let call_is_multiline = self.tcx.sess.source_map() + .is_multiline(call_expr.span); + if call_is_multiline { + let span = self.tcx.sess.source_map().next_point(callee.span); + err.span_suggestion_with_applicability( + span, + "try adding a semicolon", + ";".to_owned(), + Applicability::MaybeIncorrect + ); + } + if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.node { + inner_callee_path = Some(inner_qpath); + self.tables.borrow().qpath_def(inner_qpath, inner_callee.hir_id) + } else { + Def::Err + } + }, + _ => { + Def::Err + } }; + + err.span_label(call_expr.span, "call expression requires function"); + let def_span = match def { Def::Err => None, Def::Local(id) | Def::Upvar(id, ..) => { @@ -255,16 +283,20 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => self.tcx.hir.span_if_local(def.def_id()) }; if let Some(span) = def_span { - let name = match unit_variant { - Some(path) => path, - None => callee_ty.to_string(), + let label = match (unit_variant, inner_callee_path) { + (Some(path), _) => format!("`{}` defined here", path), + (_, Some(hir::QPath::Resolved(_, path))) => format!( + "`{}` defined here returns `{}`", path, callee_ty.to_string() + ), + _ => format!("`{}` defined here", callee_ty.to_string()), }; - err.span_label(span, format!("`{}` defined here", name)); + err.span_label(span, label); } + err.emit(); + } else { + bug!("call_expr.node should be an ExprKind::Call, got {:?}", call_expr.node); } - err.emit(); - // This is the "default" function signature, used in case of error. // In that case, we check each argument against "error" in order to // set up all the node type bindings. @@ -284,7 +316,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // previously appeared within a `Binder<>` and hence would not // have been normalized before. let fn_sig = - self.replace_late_bound_regions_with_fresh_var(call_expr.span, infer::FnCall, &fn_sig) + self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, &fn_sig) .0; let fn_sig = self.normalize_associated_types_in(call_expr.span, &fn_sig); diff --git a/src/librustc_typeck/check/cast.rs b/src/librustc_typeck/check/cast.rs index e0ee26cba0..c35aee7883 100644 --- a/src/librustc_typeck/check/cast.rs +++ b/src/librustc_typeck/check/cast.rs @@ -128,7 +128,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::Opaque(def_id, substs) => Some(PointerKind::OfOpaque(def_id, substs)), ty::Param(ref p) => Some(PointerKind::OfParam(p)), // Insufficient type information. - ty::Infer(_) => None, + ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) => None, ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | ty::Float(_) | ty::Array(..) | ty::GeneratorWitness(..) | diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index 3f4d187813..10ac2448d0 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -13,10 +13,12 @@ use super::{check_fn, Expectation, FnCtxt, GeneratorTypes}; use astconv::AstConv; +use middle::region; use rustc::hir::def_id::DefId; use rustc::infer::{InferOk, InferResult}; use rustc::infer::LateBoundRegionConversionTime; use rustc::infer::type_variable::TypeVariableOrigin; +use rustc::traits::Obligation; use rustc::traits::error_reporting::ArgKind; use rustc::ty::{self, ToPolyTraitRef, Ty, GenericParamDefKind}; use rustc::ty::fold::TypeFoldable; @@ -458,7 +460,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Create a `PolyFnSig`. Note the oddity that late bound // regions appearing free in `expected_sig` are now bound up // in this binder we are creating. - assert!(!expected_sig.sig.has_regions_bound_above(ty::INNERMOST)); + assert!(!expected_sig.sig.has_vars_bound_above(ty::INNERMOST)); let bound_sig = ty::Binder::bind(self.tcx.mk_fn_sig( expected_sig.sig.inputs().iter().cloned(), expected_sig.sig.output(), @@ -479,7 +481,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Along the way, it also writes out entries for types that the user // wrote into our tables, which are then later used by the privacy // check. - match self.check_supplied_sig_against_expectation(expr_def_id, decl, &closure_sigs) { + match self.check_supplied_sig_against_expectation(expr_def_id, decl, body, &closure_sigs) { Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok), Err(_) => return self.sig_of_closure_no_expectation(expr_def_id, decl, body), } @@ -523,6 +525,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { &self, expr_def_id: DefId, decl: &hir::FnDecl, + body: &hir::Body, expected_sigs: &ClosureSignatures<'tcx>, ) -> InferResult<'tcx, ()> { // Get the signature S that the user gave. @@ -561,7 +564,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // `liberated_sig` is E'. { // Instantiate (this part of..) S to S', i.e., with fresh variables. - let (supplied_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var( + let (supplied_ty, _) = self.infcx.replace_bound_vars_with_fresh_vars( hir_ty.span, LateBoundRegionConversionTime::FnCall, &ty::Binder::bind(supplied_ty), @@ -575,9 +578,34 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } = self.at(cause, self.param_env) .eq(*expected_ty, supplied_ty)?; all_obligations.extend(obligations); + + // Also, require that the supplied type must outlive + // the closure body. + let closure_body_region = self.tcx.mk_region( + ty::ReScope( + region::Scope { + id: body.value.hir_id.local_id, + data: region::ScopeData::Node, + }, + ), + ); + all_obligations.push( + Obligation::new( + cause.clone(), + self.param_env, + ty::Predicate::TypeOutlives( + ty::Binder::dummy( + ty::OutlivesPredicate( + supplied_ty, + closure_body_region, + ), + ), + ), + ), + ); } - let (supplied_output_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var( + let (supplied_output_ty, _) = self.infcx.replace_bound_vars_with_fresh_vars( decl.output.span(), LateBoundRegionConversionTime::FnCall, &supplied_sig.output(), diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index 967c710ac3..3bdd038bff 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -61,7 +61,7 @@ //! we may want to adjust precisely when coercions occur. use check::{FnCtxt, Needs}; - +use errors::DiagnosticBuilder; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::infer::{Coercion, InferResult, InferOk}; @@ -72,14 +72,12 @@ use rustc::ty::{self, TypeAndMut, Ty, ClosureSubsts}; use rustc::ty::fold::TypeFoldable; use rustc::ty::error::TypeError; use rustc::ty::relate::RelateResult; -use errors::DiagnosticBuilder; +use smallvec::{smallvec, SmallVec}; +use std::ops::Deref; use syntax::feature_gate; use syntax::ptr::P; use syntax_pos; -use std::collections::VecDeque; -use std::ops::Deref; - struct Coerce<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, cause: ObligationCause<'tcx>, @@ -536,18 +534,23 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { let mut selcx = traits::SelectionContext::new(self); - // Use a FIFO queue for this custom fulfillment procedure. (The maximum - // length is almost always 1.) - let mut queue = VecDeque::with_capacity(1); - // Create an obligation for `Source: CoerceUnsized`. let cause = ObligationCause::misc(self.cause.span, self.body_id); - queue.push_back(self.tcx.predicate_for_trait_def(self.fcx.param_env, - cause, - coerce_unsized_did, - 0, - coerce_source, - &[coerce_target.into()])); + + // Use a FIFO queue for this custom fulfillment procedure. + // + // A Vec (or SmallVec) is not a natural choice for a queue. However, + // this code path is hot, and this queue usually has a max length of 1 + // and almost never more than 3. By using a SmallVec we avoid an + // allocation, at the (very small) cost of (occasionally) having to + // shift subsequent elements down when removing the front element. + let mut queue: SmallVec<[_; 4]> = + smallvec![self.tcx.predicate_for_trait_def(self.fcx.param_env, + cause, + coerce_unsized_did, + 0, + coerce_source, + &[coerce_target.into()])]; let mut has_unsized_tuple_coercion = false; @@ -555,7 +558,8 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where // inference might unify those two inner type variables later. let traits = [coerce_unsized_did, unsize_did]; - while let Some(obligation) = queue.pop_front() { + while !queue.is_empty() { + let obligation = queue.remove(0); debug!("coerce_unsized resolve step: {:?}", obligation); let trait_ref = match obligation.predicate { ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => { diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index 54c6c8f7b9..e30ebe07e5 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -233,7 +233,7 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut selcx = traits::SelectionContext::new(&infcx); let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_skol_substs); - let (impl_m_own_bounds, _) = infcx.replace_late_bound_regions_with_fresh_var( + let (impl_m_own_bounds, _) = infcx.replace_bound_vars_with_fresh_vars( impl_m_span, infer::HigherRankedType, &ty::Binder::bind(impl_m_own_bounds.predicates) @@ -262,10 +262,11 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Compute placeholder form of impl and trait method tys. let tcx = infcx.tcx; - let (impl_sig, _) = - infcx.replace_late_bound_regions_with_fresh_var(impl_m_span, - infer::HigherRankedType, - &tcx.fn_sig(impl_m.def_id)); + let (impl_sig, _) = infcx.replace_bound_vars_with_fresh_vars( + impl_m_span, + infer::HigherRankedType, + &tcx.fn_sig(impl_m.def_id) + ); let impl_sig = inh.normalize_associated_types_in(impl_m_span, impl_m_node_id, @@ -595,7 +596,9 @@ fn compare_number_of_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if num_impl_m_type_params != num_trait_m_type_params { let impl_m_node_id = tcx.hir.as_local_node_id(impl_m.def_id).unwrap(); let impl_m_item = tcx.hir.expect_impl_item(impl_m_node_id); - let span = if impl_m_item.generics.params.is_empty() { + let span = if impl_m_item.generics.params.is_empty() + || impl_m_item.generics.span.is_dummy() // impl Trait in argument position (#55374) + { impl_m_span } else { impl_m_item.generics.span diff --git a/src/librustc_typeck/check/demand.rs b/src/librustc_typeck/check/demand.rs index 7773e2d570..0a196834cb 100644 --- a/src/librustc_typeck/check/demand.rs +++ b/src/librustc_typeck/check/demand.rs @@ -132,7 +132,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if compatible_variants.peek().is_some() { let expr_text = print::to_string(print::NO_ANN, |s| s.print_expr(expr)); let suggestions = compatible_variants - .map(|v| format!("{}({})", v, expr_text)).collect::>(); + .map(|v| format!("{}({})", v, expr_text)); err.span_suggestions_with_applicability( expr.span, "try using a variant of the expected type", diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index da96d4f0cb..2af21f5474 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -14,6 +14,7 @@ use intrinsics; use rustc::traits::{ObligationCause, ObligationCauseCode}; use rustc::ty::{self, TyCtxt, Ty}; +use rustc::ty::subst::Subst; use rustc::util::nodemap::FxHashMap; use require_same_types; @@ -81,6 +82,16 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &hir::ForeignItem) { let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)).as_interned_str()); let name = it.name.as_str(); + + let mk_va_list_ty = || { + tcx.lang_items().va_list().map(|did| { + let region = tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(0))); + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); + let va_list_ty = tcx.type_of(did).subst(tcx, &[region.into()]); + tcx.mk_mut_ref(tcx.mk_region(env_region), va_list_ty) + }) + }; + let (n_tps, inputs, output, unsafety) = if name.starts_with("atomic_") { let split : Vec<&str> = name.split('_').collect(); assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format"); @@ -134,6 +145,7 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "rustc_peek" => (1, vec![param(0)], param(0)), "init" => (1, Vec::new(), param(0)), "uninit" => (1, Vec::new(), param(0)), + "forget" => (1, vec![param(0)], tcx.mk_unit()), "transmute" => (2, vec![ param(0) ], param(1)), "move_val_init" => { (1, @@ -292,7 +304,8 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "unchecked_div" | "unchecked_rem" | "exact_div" => (1, vec![param(0), param(0)], param(0)), - "unchecked_shl" | "unchecked_shr" => + "unchecked_shl" | "unchecked_shr" | + "rotate_left" | "rotate_right" => (1, vec![param(0), param(0)], param(0)), "overflowing_add" | "overflowing_sub" | "overflowing_mul" => @@ -321,6 +334,47 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32) } + "va_start" | "va_end" => { + match mk_va_list_ty() { + Some(va_list_ty) => (0, vec![va_list_ty], tcx.mk_unit()), + None => bug!("va_list lang_item must be defined to use va_list intrinsics") + } + } + + "va_copy" => { + match tcx.lang_items().va_list() { + Some(did) => { + let region = tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(0))); + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); + let va_list_ty = tcx.type_of(did).subst(tcx, &[region.into()]); + let ret_ty = match va_list_ty.sty { + ty::Adt(def, _) if def.is_struct() => { + let fields = &def.non_enum_variant().fields; + match tcx.type_of(fields[0].did).subst(tcx, &[region.into()]).sty { + ty::Ref(_, element_ty, _) => match element_ty.sty { + ty::Adt(..) => element_ty, + _ => va_list_ty + } + _ => bug!("va_list structure is invalid") + } + } + _ => { + bug!("va_list structure is invalid") + } + }; + (0, vec![tcx.mk_imm_ref(tcx.mk_region(env_region), va_list_ty)], ret_ty) + } + None => bug!("va_list lang_item must be defined to use va_list intrinsics") + } + } + + "va_arg" => { + match mk_va_list_ty() { + Some(va_list_ty) => (1, vec![va_list_ty], param(0)), + None => bug!("va_list lang_item must be defined to use va_list intrinsics") + } + } + "nontemporal_store" => { (1, vec![ tcx.mk_mut_ptr(param(0)), param(0) ], tcx.mk_unit()) } @@ -419,7 +473,7 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut structural_to_nomimal = FxHashMap::default(); let sig = tcx.fn_sig(def_id); - let sig = sig.no_late_bound_regions().unwrap(); + let sig = sig.no_bound_vars().unwrap(); if intr.inputs.len() != sig.inputs().len() { span_err!(tcx.sess, it.span, E0444, "platform-specific intrinsic has invalid number of \ diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index 75f5bf74c6..5144f3e41d 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -245,7 +245,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty); let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id); let upcast_trait_ref = - this.replace_late_bound_regions_with_fresh_var(&upcast_poly_trait_ref); + this.replace_bound_vars_with_fresh_vars(&upcast_poly_trait_ref); debug!("original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}", original_poly_trait_ref, upcast_trait_ref, @@ -268,7 +268,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { probe::WhereClausePick(ref poly_trait_ref) => { // Where clauses can have bound regions in them. We need to instantiate // those to convert from a poly-trait-ref to a trait-ref. - self.replace_late_bound_regions_with_fresh_var(&poly_trait_ref).substs + self.replace_bound_vars_with_fresh_vars(&poly_trait_ref).substs } } } @@ -398,7 +398,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // NB: Instantiate late-bound regions first so that // `instantiate_type_scheme` can normalize associated types that // may reference those regions. - let method_sig = self.replace_late_bound_regions_with_fresh_var(&sig); + let method_sig = self.replace_bound_vars_with_fresh_vars(&sig); debug!("late-bound lifetimes from method instantiated, method_sig={:?}", method_sig); @@ -633,11 +633,9 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { upcast_trait_refs.into_iter().next().unwrap() } - fn replace_late_bound_regions_with_fresh_var(&self, value: &ty::Binder) -> T + fn replace_bound_vars_with_fresh_vars(&self, value: &ty::Binder) -> T where T: TypeFoldable<'tcx> { - self.fcx - .replace_late_bound_regions_with_fresh_var(self.span, infer::FnCall, value) - .0 + self.fcx.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, value).0 } } diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index 04c32fa882..37f4ae5677 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -10,7 +10,7 @@ //! Method lookup: the secret sauce of Rust. See the [rustc guide] chapter. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/method-lookup.html +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/method-lookup.html use check::FnCtxt; use hir::def::Def; @@ -289,8 +289,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Trait must have a method named `m_name` and it should not have // type parameters or early-bound regions. let tcx = self.tcx; - let method_item = - self.associated_item(trait_def_id, m_name, Namespace::Value).unwrap(); + let method_item = match self.associated_item(trait_def_id, m_name, Namespace::Value) { + Some(method_item) => method_item, + None => { + tcx.sess.delay_span_bug(span, + "operator trait does not have corresponding operator method"); + return None; + } + }; let def_id = method_item.def_id; let generics = tcx.generics_of(def_id); assert_eq!(generics.params.len(), 0); @@ -305,9 +311,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // `instantiate_type_scheme` can normalize associated types that // may reference those regions. let fn_sig = tcx.fn_sig(def_id); - let fn_sig = self.replace_late_bound_regions_with_fresh_var(span, - infer::FnCall, - &fn_sig).0; + let fn_sig = self.replace_bound_vars_with_fresh_vars( + span, + infer::FnCall, + &fn_sig + ).0; let fn_sig = fn_sig.subst(self.tcx, substs); let fn_sig = match self.normalize_associated_types_in_as_infer_ok(span, &fn_sig) { InferOk { value, obligations: o } => { @@ -331,7 +339,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { value } }; - assert!(!bounds.has_escaping_regions()); + assert!(!bounds.has_escaping_bound_vars()); let cause = traits::ObligationCause::misc(span, self.body_id); obligations.extend(traits::predicates_for_generics(cause.clone(), diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index c506f23078..5b67116cb5 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -31,6 +31,7 @@ use rustc::middle::stability; use syntax::ast; use syntax::util::lev_distance::{lev_distance, find_best_match_for_name}; use syntax_pos::{Span, symbol::Symbol}; +use std::iter; use std::mem; use std::ops::Deref; use std::rc::Rc; @@ -627,7 +628,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // itself. Hence, a `&self` method will wind up with an // argument type like `&Trait`. let trait_ref = principal.with_self_ty(self.tcx, self_ty); - self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| { + self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| { let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); let (xform_self_ty, xform_ret_ty) = @@ -645,7 +646,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { param_ty: ty::ParamTy) { // FIXME -- Do we want to commit to this behavior for param bounds? - let bounds: Vec<_> = self.param_env + let bounds = self.param_env .caller_bounds .iter() .filter_map(|predicate| { @@ -667,10 +668,9 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { ty::Predicate::TypeOutlives(..) | ty::Predicate::ConstEvaluatable(..) => None, } - }) - .collect(); + }); - self.elaborate_bounds(&bounds, |this, poly_trait_ref, item| { + self.elaborate_bounds(bounds, |this, poly_trait_ref, item| { let trait_ref = this.erase_late_bound_regions(&poly_trait_ref); let (xform_self_ty, xform_ret_ty) = @@ -693,15 +693,16 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // Do a search through a list of bounds, using a callback to actually // create the candidates. - fn elaborate_bounds(&mut self, bounds: &[ty::PolyTraitRef<'tcx>], mut mk_cand: F) + fn elaborate_bounds(&mut self, + bounds: impl Iterator>, + mut mk_cand: F) where F: for<'b> FnMut(&mut ProbeContext<'b, 'gcx, 'tcx>, ty::PolyTraitRef<'tcx>, ty::AssociatedItem) { - debug!("elaborate_bounds(bounds={:?})", bounds); - let tcx = self.tcx; for bound_trait_ref in traits::transitive_bounds(tcx, bounds) { + debug!("elaborate_bounds(bound_trait_ref={:?})", bound_trait_ref); for item in self.impl_or_trait_item(bound_trait_ref.def_id()) { if !self.has_applicable_self(&item) { self.record_static_candidate(TraitSource(bound_trait_ref.def_id())); @@ -754,8 +755,11 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.probe(|_| { let substs = self.fresh_substs_for_item(self.span, method.def_id); let fty = fty.subst(self.tcx, substs); - let (fty, _) = self.replace_late_bound_regions_with_fresh_var( - self.span, infer::FnCall, &fty); + let (fty, _) = self.replace_bound_vars_with_fresh_vars( + self.span, + infer::FnCall, + &fty + ); if let Some(self_ty) = self_ty { if self.at(&ObligationCause::dummy(), self.param_env) @@ -831,7 +835,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } let static_candidates = mem::replace(&mut self.static_candidates, vec![]); - let private_candidate = mem::replace(&mut self.private_candidate, None); + let private_candidate = self.private_candidate.take(); let unsatisfied_predicates = mem::replace(&mut self.unsatisfied_predicates, vec![]); // things failed, so lets look at all traits, for diagnostic purposes now: @@ -1373,7 +1377,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn_sig, substs); - assert!(!substs.has_escaping_regions()); + assert!(!substs.has_escaping_bound_vars()); // It is possible for type parameters or early-bound lifetimes // to appear in the signature of `self`. The substitutions we @@ -1436,7 +1440,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { /// /// 1. Because the numbers of the region variables would otherwise be fairly unique to this /// particular method call, it winds up creating fewer types overall, which helps for memory - /// usage. (Admittedly, this is a rather small effect, though measureable.) + /// usage. (Admittedly, this is a rather small effect, though measurable.) /// /// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any /// late-bound regions with 'static. Otherwise, if we were going to replace late-bound @@ -1490,7 +1494,7 @@ impl<'tcx> Candidate<'tcx> { // `WhereClausePick`. assert!( !trait_ref.skip_binder().substs.needs_infer() - && !trait_ref.skip_binder().substs.has_skol() + && !trait_ref.skip_binder().substs.has_placeholders() ); WhereClausePick(trait_ref.clone()) diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 5a63a2971e..cd243d4144 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -521,7 +521,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { with_crate_prefix(|| self.tcx.item_path_str(*did)), additional_newline ) - }).collect(); + }); err.span_suggestions_with_applicability( span, diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 85c69d50a1..e30a79b25d 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -103,6 +103,8 @@ use rustc::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoB use rustc::ty::fold::TypeFoldable; use rustc::ty::query::Providers; use rustc::ty::util::{Representability, IntTypeExt, Discr}; +use rustc::ty::layout::VariantIdx; +use rustc_data_structures::indexed_vec::Idx; use errors::{Applicability, DiagnosticBuilder, DiagnosticId}; use require_c_abi_if_variadic; @@ -206,6 +208,10 @@ pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { fulfillment_cx: RefCell>>, + // Some additional `Sized` obligations badly affect type inference. + // These obligations are added in a later stage of typeck. + deferred_sized_obligations: RefCell, Span, traits::ObligationCauseCode<'tcx>)>>, + // When we process a call like `c()` where `c` is a closure type, // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or // `FnOnce` closure. In that case, we defer full resolution of the @@ -548,7 +554,7 @@ pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// current expression. As each subpart is processed, they may set /// the flag to `Always` etc. Finally, at the end, we take the /// result and "union" it with the original value, so that when we - /// return the flag indicates if any subpart of the the parent + /// return the flag indicates if any subpart of the parent /// expression (up to and including this part) has diverged. So, /// if you read it after evaluating a subexpression `X`, the value /// you get indicates whether any subexpression that was @@ -641,11 +647,12 @@ impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { }, infcx, fulfillment_cx: RefCell::new(TraitEngine::new(tcx)), - locals: RefCell::new(NodeMap()), - deferred_call_resolutions: RefCell::new(DefIdMap()), + locals: RefCell::new(Default::default()), + deferred_sized_obligations: RefCell::new(Vec::new()), + deferred_call_resolutions: RefCell::new(Default::default()), deferred_cast_checks: RefCell::new(Vec::new()), deferred_generator_interiors: RefCell::new(Vec::new()), - opaque_types: RefCell::new(DefIdMap()), + opaque_types: RefCell::new(Default::default()), implicit_region_bound, body_id, } @@ -653,8 +660,8 @@ impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) { debug!("register_predicate({:?})", obligation); - if obligation.has_escaping_regions() { - span_bug!(obligation.cause.span, "escaping regions in predicate {:?}", + if obligation.has_escaping_bound_vars() { + span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation); } self.fulfillment_cx @@ -905,6 +912,10 @@ fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fcx.closure_analyze(body); assert!(fcx.deferred_call_resolutions.borrow().is_empty()); fcx.resolve_generator_interiors(def_id); + + for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) { + fcx.require_type_is_sized(ty, span, code); + } fcx.select_all_obligations_or_error(); if fn_decl.is_some() { @@ -976,7 +987,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> { o_ty }; - let c_ty = self.fcx.inh.infcx.canonicalize_response(&revealed_ty); + let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(&revealed_ty); debug!("visit_local: ty.hir_id={:?} o_ty={:?} revealed_ty={:?} c_ty={:?}", ty.hir_id, o_ty, revealed_ty, c_ty); self.fcx.tables.borrow_mut().user_provided_tys_mut().insert(ty.hir_id, c_ty); @@ -1167,7 +1178,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, } } - // Check that a function marked as `#[panic_implementation]` has signature `fn(&PanicInfo) -> !` + // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !` if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() { if panic_impl_did == fcx.tcx.hir.local_def_id(fn_id) { if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() { @@ -1713,7 +1724,7 @@ fn check_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) let repr = tcx.adt_def(def_id).repr; if repr.packed() { for attr in tcx.get_attrs(def_id).iter() { - for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) { + for r in attr::find_repr_attrs(&tcx.sess.parse_sess, attr) { if let attr::ReprPacked(pack) = r { if pack != repr.pack { struct_span_err!(tcx.sess, sp, E0634, @@ -1777,7 +1788,7 @@ fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: De // We are currently checking the type this field came from, so it must be local let span = tcx.hir.span_if_local(field.did).unwrap(); let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false); - let align1 = layout.map(|layout| layout.align.abi() == 1).unwrap_or(false); + let align1 = layout.map(|layout| layout.align.abi.bytes() == 1).unwrap_or(false); (span, zst, align1) }); @@ -1837,10 +1848,11 @@ pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } let mut disr_vals: Vec> = Vec::with_capacity(vs.len()); - for (discr, v) in def.discriminants(tcx).zip(vs) { + for ((_, discr), v) in def.discriminants(tcx).zip(vs) { // Check for duplicate discriminant values if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) { - let variant_i_node_id = tcx.hir.as_local_node_id(def.variants[i].did).unwrap(); + let variant_did = def.variants[VariantIdx::new(i)].did; + let variant_i_node_id = tcx.hir.as_local_node_id(variant_did).unwrap(); let variant_i = tcx.hir.expect_variant(variant_i_node_id); let i_span = match variant_i.node.disr_expr { Some(ref expr) => tcx.hir.span(expr.id), @@ -1866,7 +1878,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } fn get_type_parameter_bounds(&self, _: Span, def_id: DefId) - -> ty::GenericPredicates<'tcx> + -> Lrc> { let tcx = self.tcx; let node_id = tcx.hir.as_local_node_id(def_id).unwrap(); @@ -1874,7 +1886,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { let item_def_id = tcx.hir.local_def_id(item_id); let generics = tcx.generics_of(item_def_id); let index = generics.param_def_id_to_index[&def_id]; - ty::GenericPredicates { + Lrc::new(ty::GenericPredicates { parent: None, predicates: self.param_env.caller_bounds.iter().filter_map(|&predicate| { match predicate { @@ -1887,7 +1899,7 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { _ => None } }).collect() - } + }) } fn re_infer(&self, span: Span, def: Option<&ty::GenericParamDef>) @@ -1918,17 +1930,17 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { poly_trait_ref: ty::PolyTraitRef<'tcx>) -> Ty<'tcx> { - let (trait_ref, _) = - self.replace_late_bound_regions_with_fresh_var( - span, - infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id), - &poly_trait_ref); + let (trait_ref, _) = self.replace_bound_vars_with_fresh_vars( + span, + infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id), + &poly_trait_ref + ); self.tcx().mk_projection(item_def_id, trait_ref.substs) } fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> { - if ty.has_escaping_regions() { + if ty.has_escaping_bound_vars() { ty // FIXME: normalization and escaping regions } else { self.normalize_associated_types_in(span, &ty) @@ -1983,7 +1995,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { has_errors: Cell::new(false), enclosing_breakables: RefCell::new(EnclosingBreakables { stack: Vec::new(), - by_id: NodeMap(), + by_id: Default::default(), }), inh, } @@ -2137,7 +2149,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { method.substs[i] } }); - self.infcx.canonicalize_response(&UserSubsts { + self.infcx.canonicalize_user_type_annotation(&UserSubsts { substs: just_method_substs, user_self_ty: None, // not relevant here }) @@ -2181,7 +2193,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); if !substs.is_noop() { - let user_substs = self.infcx.canonicalize_response(&UserSubsts { + let user_substs = self.infcx.canonicalize_user_type_annotation(&UserSubsts { substs, user_self_ty, }); @@ -2342,6 +2354,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.require_type_meets(ty, span, code, lang_item); } + pub fn require_type_is_sized_deferred(&self, + ty: Ty<'tcx>, + span: Span, + code: traits::ObligationCauseCode<'tcx>) + { + self.deferred_sized_obligations.borrow_mut().push((ty, span, code)); + } + pub fn register_bound(&self, ty: Ty<'tcx>, def_id: DefId, @@ -2431,7 +2451,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { cause: traits::ObligationCause<'tcx>, predicates: &ty::InstantiatedPredicates<'tcx>) { - assert!(!predicates.has_escaping_regions()); + assert!(!predicates.has_escaping_bound_vars()); debug!("add_obligations_for_parameters(predicates={:?})", predicates); @@ -3510,10 +3530,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => span_bug!(span, "non-ADT passed to check_expr_struct_fields") }; - let mut remaining_fields = FxHashMap::default(); - for (i, field) in variant.fields.iter().enumerate() { - remaining_fields.insert(field.ident.modern(), (i, field)); - } + let mut remaining_fields = variant.fields.iter().enumerate().map(|(i, field)| + (field.ident.modern(), (i, field)) + ).collect::>(); let mut seen_fields = FxHashMap::default(); @@ -3937,6 +3956,31 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tcx.types.err }; + if let ty::FnDef(..) = ty.sty { + let fn_sig = ty.fn_sig(tcx); + if !tcx.features().unsized_locals { + // We want to remove some Sized bounds from std functions, + // but don't want to expose the removal to stable Rust. + // i.e. we don't want to allow + // + // ```rust + // drop as fn(str); + // ``` + // + // to work in stable even if the Sized bound on `drop` is relaxed. + for i in 0..fn_sig.inputs().skip_binder().len() { + let input = tcx.erase_late_bound_regions(&fn_sig.input(i)); + self.require_type_is_sized_deferred(input, expr.span, + traits::SizedArgumentType); + } + } + // Here we want to prevent struct constructors from returning unsized types. + // There were two cases this happened: fn pointer coercion in stable + // and usual function call in presense of unsized_locals. + let output = tcx.erase_late_bound_regions(&fn_sig.output()); + self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType); + } + // We always require that the type provided as the value for // a type parameter outlives the moment of instantiation. let substs = self.tables.borrow().node_substs(expr.hir_id); @@ -4238,13 +4282,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; let count = tcx.const_eval(param_env.and(global_id)); - if let Err(ref err) = count { - err.report_as_error( - tcx.at(tcx.def_span(count_def_id)), - "could not evaluate repeat length", - ); - } - let uty = match expected { ExpectHasType(uty) => { match uty.sty { @@ -4752,7 +4789,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } else if !self.check_for_cast(err, expr, found, expected) { let methods = self.get_conversion_methods(expr.span, expected, found); if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) { - let suggestions = iter::repeat(&expr_text).zip(methods.iter()) + let mut suggestions = iter::repeat(&expr_text).zip(methods.iter()) .filter_map(|(receiver, method)| { let method_call = format!(".{}()", method.ident); if receiver.ends_with(&method_call) { @@ -4768,8 +4805,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Some(format!("{}{}", receiver, method_call)) } } - }).collect::>(); - if !suggestions.is_empty() { + }).peekable(); + if suggestions.peek().is_some() { err.span_suggestions_with_applicability( expr.span, "try using a conversion method", @@ -5058,10 +5095,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // provided (if any) into their appropriate spaces. We'll also report // errors if type parameters are provided in an inappropriate place. - let mut generic_segs = FxHashSet::default(); - for PathSeg(_, index) in &path_segs { - generic_segs.insert(index); - } + let generic_segs = path_segs.iter().map(|PathSeg(_, index)| index) + .collect::>(); AstConv::prohibit_generics(self, segments.iter().enumerate().filter_map(|(index, seg)| { if !generic_segs.contains(&index) { Some(seg) @@ -5198,8 +5233,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }, ); - assert!(!substs.has_escaping_regions()); - assert!(!ty.has_escaping_regions()); + assert!(!substs.has_escaping_bound_vars()); + assert!(!ty.has_escaping_bound_vars()); // Write the "user substs" down first thing for later. let hir_id = self.tcx.hir.node_to_hir_id(node_id); diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index 80b4ba6240..212ee2698e 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -1243,6 +1243,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { | Categorization::StaticItem | Categorization::Upvar(..) | Categorization::Local(..) + | Categorization::ThreadLocal(..) | Categorization::Rvalue(..) => { // These are all "base cases" with independent lifetimes // that are not subject to inference diff --git a/src/librustc_typeck/check/upvar.rs b/src/librustc_typeck/check/upvar.rs index 99effce4ee..312ce40277 100644 --- a/src/librustc_typeck/check/upvar.rs +++ b/src/librustc_typeck/check/upvar.rs @@ -45,14 +45,14 @@ use super::FnCtxt; use middle::expr_use_visitor as euv; use middle::mem_categorization as mc; use middle::mem_categorization::Categorization; -use rustc::hir::def_id::DefId; -use rustc::ty::{self, Ty, TyCtxt, UpvarSubsts}; -use rustc::infer::UpvarRegion; -use syntax::ast; -use syntax_pos::Span; use rustc::hir; +use rustc::hir::def_id::DefId; use rustc::hir::def_id::LocalDefId; use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor}; +use rustc::infer::UpvarRegion; +use rustc::ty::{self, Ty, TyCtxt, UpvarSubsts}; +use syntax::ast; +use syntax_pos::Span; impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn closure_analyze(&self, body: &'gcx hir::Body) { @@ -121,7 +121,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }; - let infer_kind = if let UpvarSubsts::Closure(closure_substs) = substs{ + let infer_kind = if let UpvarSubsts::Closure(closure_substs) = substs { if self.closure_kind(closure_def_id, closure_substs).is_none() { Some(closure_substs) } else { @@ -134,7 +134,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.with_freevars(closure_node_id, |freevars| { for freevar in freevars { let upvar_id = ty::UpvarId { - var_id: self.tcx.hir.node_to_hir_id(freevar.var_id()), + var_path: ty::UpvarPath { + hir_id : self.tcx.hir.node_to_hir_id(freevar.var_id()), + }, closure_expr_id: LocalDefId::from_def_id(closure_def_id), }; debug!("seed upvar_id {:?}", upvar_id); @@ -213,12 +215,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let final_upvar_tys = self.final_upvar_tys(closure_node_id); debug!( "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}", - closure_node_id, - substs, - final_upvar_tys + closure_node_id, substs, final_upvar_tys ); - for (upvar_ty, final_upvar_ty) in substs.upvar_tys(closure_def_id, self.tcx) - .zip(final_upvar_tys) + for (upvar_ty, final_upvar_ty) in substs + .upvar_tys(closure_def_id, self.tcx) + .zip(final_upvar_tys) { self.demand_suptype(span, upvar_ty, final_upvar_ty); } @@ -249,16 +250,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let var_hir_id = tcx.hir.node_to_hir_id(var_node_id); let freevar_ty = self.node_ty(var_hir_id); let upvar_id = ty::UpvarId { - var_id: var_hir_id, + var_path: ty::UpvarPath { + hir_id: var_hir_id, + }, closure_expr_id: LocalDefId::from_def_id(closure_def_index), }; let capture = self.tables.borrow().upvar_capture(upvar_id); debug!( "var_id={:?} freevar_ty={:?} capture={:?}", - var_node_id, - freevar_ty, - capture + var_node_id, freevar_ty, capture ); match capture { @@ -271,8 +272,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }, ), } - }) - .collect() + }).collect() }) } } @@ -301,12 +301,14 @@ struct InferBorrowKind<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { } impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { - fn adjust_upvar_borrow_kind_for_consume(&mut self, cmt: &mc::cmt_<'tcx>, - mode: euv::ConsumeMode) { + fn adjust_upvar_borrow_kind_for_consume( + &mut self, + cmt: &mc::cmt_<'tcx>, + mode: euv::ConsumeMode, + ) { debug!( "adjust_upvar_borrow_kind_for_consume(cmt={:?}, mode={:?})", - cmt, - mode + cmt, mode ); // we only care about moves @@ -349,7 +351,7 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { upvar_id.closure_expr_id, ty::ClosureKind::FnOnce, guarantor.span, - var_name(tcx, upvar_id.var_id), + var_name(tcx, upvar_id.var_path.hir_id), ); self.adjust_upvar_captures @@ -366,7 +368,7 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { upvar_id.closure_expr_id, ty::ClosureKind::FnOnce, guarantor.span, - var_name(tcx, upvar_id.var_id), + var_name(tcx, upvar_id.var_path.hir_id), ); } mc::NoteIndex | mc::NoteNone => {} @@ -381,9 +383,9 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { debug!("adjust_upvar_borrow_kind_for_mut(cmt={:?})", cmt); match cmt.cat.clone() { - Categorization::Deref(base, mc::Unique) | - Categorization::Interior(base, _) | - Categorization::Downcast(base, _) => { + Categorization::Deref(base, mc::Unique) + | Categorization::Interior(base, _) + | Categorization::Downcast(base, _) => { // Interior or owned data is mutable if base is // mutable, so iterate to the base. self.adjust_upvar_borrow_kind_for_mut(&base); @@ -399,11 +401,12 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { } } - Categorization::Deref(_, mc::UnsafePtr(..)) | - Categorization::StaticItem | - Categorization::Rvalue(..) | - Categorization::Local(_) | - Categorization::Upvar(..) => { + Categorization::Deref(_, mc::UnsafePtr(..)) + | Categorization::StaticItem + | Categorization::ThreadLocal(..) + | Categorization::Rvalue(..) + | Categorization::Local(_) + | Categorization::Upvar(..) => { return; } } @@ -413,9 +416,9 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { debug!("adjust_upvar_borrow_kind_for_unique(cmt={:?})", cmt); match cmt.cat.clone() { - Categorization::Deref(base, mc::Unique) | - Categorization::Interior(base, _) | - Categorization::Downcast(base, _) => { + Categorization::Deref(base, mc::Unique) + | Categorization::Interior(base, _) + | Categorization::Downcast(base, _) => { // Interior or owned data is unique if base is // unique. self.adjust_upvar_borrow_kind_for_unique(&base); @@ -429,17 +432,20 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { } } - Categorization::Deref(_, mc::UnsafePtr(..)) | - Categorization::StaticItem | - Categorization::Rvalue(..) | - Categorization::Local(_) | - Categorization::Upvar(..) => {} + Categorization::Deref(_, mc::UnsafePtr(..)) + | Categorization::StaticItem + | Categorization::ThreadLocal(..) + | Categorization::Rvalue(..) + | Categorization::Local(_) + | Categorization::Upvar(..) => {} } } - fn try_adjust_upvar_deref(&mut self, cmt: &mc::cmt_<'tcx>, borrow_kind: ty::BorrowKind) - -> bool - { + fn try_adjust_upvar_deref( + &mut self, + cmt: &mc::cmt_<'tcx>, + borrow_kind: ty::BorrowKind, + ) -> bool { assert!(match borrow_kind { ty::MutBorrow => true, ty::UniqueImmBorrow => true, @@ -463,7 +469,7 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { upvar_id.closure_expr_id, ty::ClosureKind::FnMut, cmt.span, - var_name(tcx, upvar_id.var_id), + var_name(tcx, upvar_id.var_path.hir_id), ); true @@ -476,7 +482,7 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { upvar_id.closure_expr_id, ty::ClosureKind::FnMut, cmt.span, - var_name(tcx, upvar_id.var_id), + var_name(tcx, upvar_id.var_path.hir_id), ); true @@ -491,15 +497,14 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { /// Here the argument `mutbl` is the borrow_kind that is required by /// some particular use. fn adjust_upvar_borrow_kind(&mut self, upvar_id: ty::UpvarId, kind: ty::BorrowKind) { - let upvar_capture = self.adjust_upvar_captures + let upvar_capture = self + .adjust_upvar_captures .get(&upvar_id) .cloned() .unwrap_or_else(|| self.fcx.tables.borrow().upvar_capture(upvar_id)); debug!( "adjust_upvar_borrow_kind(upvar_id={:?}, upvar_capture={:?}, kind={:?})", - upvar_id, - upvar_capture, - kind + upvar_id, upvar_capture, kind ); match upvar_capture { @@ -509,18 +514,18 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { ty::UpvarCapture::ByRef(mut upvar_borrow) => { match (upvar_borrow.kind, kind) { // Take RHS: - (ty::ImmBorrow, ty::UniqueImmBorrow) | - (ty::ImmBorrow, ty::MutBorrow) | - (ty::UniqueImmBorrow, ty::MutBorrow) => { + (ty::ImmBorrow, ty::UniqueImmBorrow) + | (ty::ImmBorrow, ty::MutBorrow) + | (ty::UniqueImmBorrow, ty::MutBorrow) => { upvar_borrow.kind = kind; self.adjust_upvar_captures .insert(upvar_id, ty::UpvarCapture::ByRef(upvar_borrow)); } // Take LHS: - (ty::ImmBorrow, ty::ImmBorrow) | - (ty::UniqueImmBorrow, ty::ImmBorrow) | - (ty::UniqueImmBorrow, ty::UniqueImmBorrow) | - (ty::MutBorrow, _) => {} + (ty::ImmBorrow, ty::ImmBorrow) + | (ty::UniqueImmBorrow, ty::ImmBorrow) + | (ty::UniqueImmBorrow, ty::UniqueImmBorrow) + | (ty::MutBorrow, _) => {} } } } @@ -535,10 +540,7 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { ) { debug!( "adjust_closure_kind(closure_id={:?}, new_kind={:?}, upvar_span={:?}, var_name={})", - closure_id, - new_kind, - upvar_span, - var_name + closure_id, new_kind, upvar_span, var_name ); // Is this the closure whose kind is currently being inferred? @@ -552,22 +554,20 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { debug!( "adjust_closure_kind: closure_id={:?}, existing_kind={:?}, new_kind={:?}", - closure_id, - existing_kind, - new_kind + closure_id, existing_kind, new_kind ); match (existing_kind, new_kind) { - (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | - (ty::ClosureKind::FnMut, ty::ClosureKind::Fn) | - (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | - (ty::ClosureKind::FnOnce, _) => { + (ty::ClosureKind::Fn, ty::ClosureKind::Fn) + | (ty::ClosureKind::FnMut, ty::ClosureKind::Fn) + | (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) + | (ty::ClosureKind::FnOnce, _) => { // no change needed } - (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) | - (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) | - (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { + (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) + | (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) + | (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { // new kind is stronger than the old kind self.current_closure_kind = new_kind; self.current_origin = Some((upvar_span, var_name)); @@ -588,12 +588,20 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { self.adjust_upvar_borrow_kind_for_consume(cmt, mode); } - fn matched_pat(&mut self, _matched_pat: &hir::Pat, _cmt: &mc::cmt_<'tcx>, - _mode: euv::MatchMode) { + fn matched_pat( + &mut self, + _matched_pat: &hir::Pat, + _cmt: &mc::cmt_<'tcx>, + _mode: euv::MatchMode, + ) { } - fn consume_pat(&mut self, _consume_pat: &hir::Pat, cmt: &mc::cmt_<'tcx>, - mode: euv::ConsumeMode) { + fn consume_pat( + &mut self, + _consume_pat: &hir::Pat, + cmt: &mc::cmt_<'tcx>, + mode: euv::ConsumeMode, + ) { debug!("consume_pat(cmt={:?},mode={:?})", cmt, mode); self.adjust_upvar_borrow_kind_for_consume(cmt, mode); } @@ -609,9 +617,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { ) { debug!( "borrow(borrow_id={}, cmt={:?}, bk={:?})", - borrow_id, - cmt, - bk + borrow_id, cmt, bk ); match bk { diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 75207f18ab..e5fe74f230 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -13,7 +13,7 @@ use constrained_type_params::{identify_constrained_type_params, Parameter}; use hir::def_id::DefId; use rustc::traits::{self, ObligationCauseCode}; -use rustc::ty::{self, Lift, Ty, TyCtxt, GenericParamDefKind, TypeFoldable}; +use rustc::ty::{self, Lift, Ty, TyCtxt, TyKind, GenericParamDefKind, TypeFoldable}; use rustc::ty::subst::{Subst, Substs}; use rustc::ty::util::ExplicitSelf; use rustc::util::nodemap::{FxHashSet, FxHashMap}; @@ -28,9 +28,9 @@ use errors::{DiagnosticBuilder, DiagnosticId}; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir; -/// Helper type of a temporary returned by .for_item(...). +/// Helper type of a temporary returned by `.for_item(...)`. /// Necessary because we can't write the following bound: -/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(FnCtxt<'b, 'gcx, 'tcx>). +/// `F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(FnCtxt<'b, 'gcx, 'tcx>)`. struct CheckWfFcxBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { inherited: super::InheritedBuilder<'a, 'gcx, 'tcx>, id: ast::NodeId, @@ -119,14 +119,14 @@ pub fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: Def check_item_fn(tcx, item); } hir::ItemKind::Static(ref ty, ..) => { - check_item_type(tcx, item.id, ty.span); + check_item_type(tcx, item.id, ty.span, false); } hir::ItemKind::Const(ref ty, ..) => { - check_item_type(tcx, item.id, ty.span); + check_item_type(tcx, item.id, ty.span, false); } hir::ItemKind::ForeignMod(ref module) => for it in module.items.iter() { if let hir::ForeignItemKind::Static(ref ty, ..) = it.node { - check_item_type(tcx, it.id, ty.span); + check_item_type(tcx, it.id, ty.span, true); } }, hir::ItemKind::Struct(ref struct_def, ref ast_generics) => { @@ -153,6 +153,9 @@ pub fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: Def hir::ItemKind::Trait(..) => { check_trait(tcx, item); } + hir::ItemKind::TraitAlias(..) => { + check_trait(tcx, item); + } _ => {} } } @@ -183,6 +186,8 @@ fn check_associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId, span: Span, sig_if_method: Option<&hir::MethodSig>) { + debug!("check_associated_item: {:?}", item_id); + let code = ObligationCauseCode::MiscObligation; for_id(tcx, item_id, span).with_fcx(|fcx, tcx| { let item = fcx.tcx.associated_item(fcx.tcx.hir.local_def_id(item_id)); @@ -308,6 +313,8 @@ fn check_type_defn<'a, 'tcx, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn check_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item) { + debug!("check_trait: {:?}", item.id); + let trait_def_id = tcx.hir.local_def_id(item.id); let trait_def = tcx.trait_def(trait_def_id); @@ -340,23 +347,33 @@ fn check_item_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item) { }) } -fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId, ty_span: Span) { +fn check_item_type<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + item_id: ast::NodeId, + ty_span: Span, + allow_foreign_ty: bool, +) { debug!("check_item_type: {:?}", item_id); for_id(tcx, item_id, ty_span).with_fcx(|fcx, gcx| { let ty = gcx.type_of(gcx.hir.local_def_id(item_id)); let item_ty = fcx.normalize_associated_types_in(ty_span, &ty); + let mut forbid_unsized = true; + if allow_foreign_ty { + if let TyKind::Foreign(_) = fcx.tcx.struct_tail(item_ty).sty { + forbid_unsized = false; + } + } + fcx.register_wf_obligation(item_ty, ty_span, ObligationCauseCode::MiscObligation); - fcx.register_bound( - item_ty, - fcx.tcx.require_lang_item(lang_items::SizedTraitLangItem), - traits::ObligationCause::new( - ty_span, - fcx.body_id, - traits::MiscObligation, - ), - ); + if forbid_unsized { + fcx.register_bound( + item_ty, + fcx.tcx.require_lang_item(lang_items::SizedTraitLangItem), + traits::ObligationCause::new(ty_span, fcx.body_id, traits::MiscObligation), + ); + } vec![] // no implied bounds in a const etc }); @@ -893,8 +910,8 @@ fn check_false_global_bounds<'a, 'gcx, 'tcx>( let def_id = fcx.tcx.hir.local_def_id(id); let predicates = fcx.tcx.predicates_of(def_id).predicates - .into_iter() - .map(|(p, _)| p) + .iter() + .map(|(p, _)| *p) .collect(); // Check elaborated bounds let implied_obligations = traits::elaborate_predicates(fcx.tcx, predicates); @@ -980,7 +997,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { AdtField { ty: field_ty, span: field.span } }) .collect(); - AdtVariant { fields: fields } + AdtVariant { fields } } fn enum_variants(&self, enum_def: &hir::EnumDef) -> Vec> { @@ -999,7 +1016,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } None => { - // Inherent impl: take implied bounds from the self type. + // Inherent impl: take implied bounds from the `self` type. let self_ty = self.tcx.type_of(impl_def_id); let self_ty = self.normalize_associated_types_in(span, &self_ty); vec![self_ty] diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index d968bf222a..669f2bcb77 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -56,7 +56,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let used_trait_imports = mem::replace( &mut self.tables.borrow_mut().used_trait_imports, - Lrc::new(DefIdSet()), + Lrc::new(DefIdSet::default()), ); debug!( "used_trait_imports({:?}) = {:?}", @@ -115,7 +115,7 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { fn write_ty_to_tables(&mut self, hir_id: hir::HirId, ty: Ty<'gcx>) { debug!("write_ty_to_tables({:?}, {:?})", hir_id, ty); - assert!(!ty.needs_infer() && !ty.has_skol()); + assert!(!ty.needs_infer() && !ty.has_placeholders()); self.tables.node_types_mut().insert(hir_id, ty); } @@ -306,7 +306,7 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { ty::UpvarCapture::ByValue => ty::UpvarCapture::ByValue, ty::UpvarCapture::ByRef(ref upvar_borrow) => { let r = upvar_borrow.region; - let r = self.resolve(&r, &upvar_id.var_id); + let r = self.resolve(&r, &upvar_id.var_path.hir_id); ty::UpvarCapture::ByRef(ty::UpvarBorrow { kind: upvar_borrow.kind, region: r, @@ -580,7 +580,7 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { if let Some(substs) = self.fcx.tables.borrow().node_substs_opt(hir_id) { let substs = self.resolve(&substs, &span); debug!("write_substs_to_tcx({:?}, {:?})", hir_id, substs); - assert!(!substs.needs_infer() && !substs.has_skol()); + assert!(!substs.needs_infer() && !substs.has_placeholders()); self.tables.node_substs_mut().insert(hir_id, substs); } diff --git a/src/librustc_typeck/check_unused.rs b/src/librustc_typeck/check_unused.rs index 9d785dfb58..9c1860fb82 100644 --- a/src/librustc_typeck/check_unused.rs +++ b/src/librustc_typeck/check_unused.rs @@ -24,7 +24,7 @@ use rustc::util::nodemap::DefIdSet; use rustc_data_structures::fx::FxHashMap; pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut used_trait_imports = DefIdSet(); + let mut used_trait_imports = DefIdSet::default(); for &body_id in tcx.hir.krate().bodies.keys() { let item_def_id = tcx.hir.body_owner_def_id(body_id); let imports = tcx.used_trait_imports(item_def_id); @@ -113,11 +113,12 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { true }) .filter(|&&(def_id, _)| { - let cnum = tcx.extern_mod_stmt_cnum(def_id).unwrap(); - !tcx.is_compiler_builtins(cnum) - && !tcx.is_panic_runtime(cnum) - && !tcx.has_global_allocator(cnum) - && !tcx.has_panic_handler(cnum) + tcx.extern_mod_stmt_cnum(def_id).map_or(true, |cnum| { + !tcx.is_compiler_builtins(cnum) && + !tcx.is_panic_runtime(cnum) && + !tcx.has_global_allocator(cnum) && + !tcx.has_panic_handler(cnum) + }) }) .cloned() .collect(); @@ -185,7 +186,7 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { Some(orig_name) => format!("use {} as {};", orig_name, item.name), None => format!("use {};", item.name), }; - let replacement = visibility_qualified(&item.vis, &base_replacement); + let replacement = visibility_qualified(&item.vis, base_replacement); tcx.struct_span_lint_node(lint, id, extern_crate.span, msg) .span_suggestion_short_with_applicability( extern_crate.span, diff --git a/src/librustc_typeck/coherence/builtin.rs b/src/librustc_typeck/coherence/builtin.rs index 05a83dd307..a5ad31e0b6 100644 --- a/src/librustc_typeck/coherence/builtin.rs +++ b/src/librustc_typeck/coherence/builtin.rs @@ -31,8 +31,9 @@ pub fn check_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_def_id: DefId) { Checker { tcx, trait_def_id } .check(tcx.lang_items().drop_trait(), visit_implementation_of_drop) .check(tcx.lang_items().copy_trait(), visit_implementation_of_copy) - .check(tcx.lang_items().coerce_unsized_trait(), - visit_implementation_of_coerce_unsized); + .check(tcx.lang_items().coerce_unsized_trait(), visit_implementation_of_coerce_unsized) + .check(tcx.lang_items().dispatch_from_dyn_trait(), + visit_implementation_of_dispatch_from_dyn); } struct Checker<'a, 'tcx: 'a> { @@ -98,7 +99,7 @@ fn visit_implementation_of_copy<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: let span = tcx.hir.span(impl_node_id); let param_env = tcx.param_env(impl_did); - assert!(!self_type.has_escaping_regions()); + assert!(!self_type.has_escaping_bound_vars()); debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type); @@ -162,6 +163,174 @@ fn visit_implementation_of_coerce_unsized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } +fn visit_implementation_of_dispatch_from_dyn<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + impl_did: DefId, +) { + debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", + impl_did); + if impl_did.is_local() { + let dispatch_from_dyn_trait = tcx.lang_items().dispatch_from_dyn_trait().unwrap(); + + let impl_node_id = tcx.hir.as_local_node_id(impl_did).unwrap(); + let span = tcx.hir.span(impl_node_id); + + let source = tcx.type_of(impl_did); + assert!(!source.has_escaping_bound_vars()); + let target = { + let trait_ref = tcx.impl_trait_ref(impl_did).unwrap(); + assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait); + + trait_ref.substs.type_at(1) + }; + + debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}", + source, + target); + + let param_env = tcx.param_env(impl_did); + + let create_err = |msg: &str| { + struct_span_err!(tcx.sess, span, E0378, "{}", msg) + }; + + tcx.infer_ctxt().enter(|infcx| { + let cause = ObligationCause::misc(span, impl_node_id); + + use ty::TyKind::*; + match (&source.sty, &target.sty) { + (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b)) + if infcx.at(&cause, param_env).eq(r_a, r_b).is_ok() + && mutbl_a == *mutbl_b => (), + (&RawPtr(tm_a), &RawPtr(tm_b)) + if tm_a.mutbl == tm_b.mutbl => (), + (&Adt(def_a, substs_a), &Adt(def_b, substs_b)) + if def_a.is_struct() && def_b.is_struct() => + { + if def_a != def_b { + let source_path = tcx.item_path_str(def_a.did); + let target_path = tcx.item_path_str(def_b.did); + + create_err( + &format!( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures with the same \ + definition; expected `{}`, found `{}`", + source_path, target_path, + ) + ).emit(); + + return + } + + if def_a.repr.c() || def_a.repr.packed() { + create_err( + "structs implementing `DispatchFromDyn` may not have \ + `#[repr(packed)]` or `#[repr(C)]`" + ).emit(); + } + + let fields = &def_a.non_enum_variant().fields; + + let coerced_fields = fields.iter().filter_map(|field| { + if tcx.type_of(field.did).is_phantom_data() { + // ignore PhantomData fields + return None + } + + let ty_a = field.ty(tcx, substs_a); + let ty_b = field.ty(tcx, substs_b); + if let Ok(ok) = infcx.at(&cause, param_env).eq(ty_a, ty_b) { + if ok.obligations.is_empty() { + create_err( + "the trait `DispatchFromDyn` may only be implemented \ + for structs containing the field being coerced, \ + `PhantomData` fields, and nothing else" + ).note( + &format!( + "extra field `{}` of type `{}` is not allowed", + field.ident, ty_a, + ) + ).emit(); + + return None; + } + } + + Some(field) + }).collect::>(); + + if coerced_fields.is_empty() { + create_err( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures with a single field \ + being coerced, none found" + ).emit(); + } else if coerced_fields.len() > 1 { + create_err( + "implementing the `DispatchFromDyn` trait requires multiple coercions", + ).note( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures with a single field \ + being coerced" + ).note( + &format!( + "currently, {} fields need coercions: {}", + coerced_fields.len(), + coerced_fields.iter().map(|field| { + format!("`{}` (`{}` to `{}`)", + field.ident, + field.ty(tcx, substs_a), + field.ty(tcx, substs_b), + ) + }).collect::>() + .join(", ") + ) + ).emit(); + } else { + let mut fulfill_cx = TraitEngine::new(infcx.tcx); + + for field in coerced_fields { + + let predicate = tcx.predicate_for_trait_def( + param_env, + cause.clone(), + dispatch_from_dyn_trait, + 0, + field.ty(tcx, substs_a), + &[field.ty(tcx, substs_b).into()] + ); + + fulfill_cx.register_predicate_obligation(&infcx, predicate); + } + + // Check that all transitive obligations are satisfied. + if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) { + infcx.report_fulfillment_errors(&errors, None, false); + } + + // Finally, resolve all regions. + let region_scope_tree = region::ScopeTree::default(); + let outlives_env = OutlivesEnvironment::new(param_env); + infcx.resolve_regions_and_report_errors( + impl_did, + ®ion_scope_tree, + &outlives_env, + SuppressRegionErrors::default(), + ); + } + } + _ => { + create_err( + "the trait `DispatchFromDyn` may only be implemented \ + for a coercion between structures" + ).emit(); + } + } + }) + } +} + pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, impl_did: DefId) -> CoerceUnsizedInfo { @@ -187,7 +356,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, let span = gcx.hir.span(impl_node_id); let param_env = gcx.param_env(impl_did); - assert!(!source.has_escaping_regions()); + assert!(!source.has_escaping_bound_vars()); let err_info = CoerceUnsizedInfo { custom_kind: None }; @@ -236,7 +405,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, E0377, "the trait `CoerceUnsized` may only be implemented \ for a coercion between structures with the same \ - definition; expected {}, found {}", + definition; expected `{}`, found `{}`", source_path, target_path); return err_info; @@ -341,7 +510,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, diff_fields.len(), diff_fields.iter() .map(|&(i, a, b)| { - format!("{} ({} to {})", fields[i].ident, a, b) + format!("`{}` (`{}` to `{}`)", fields[i].ident, a, b) }) .collect::>() .join(", "))); diff --git a/src/librustc_typeck/coherence/inherent_impls.rs b/src/librustc_typeck/coherence/inherent_impls.rs index ec979dea4f..d9dff14412 100644 --- a/src/librustc_typeck/coherence/inherent_impls.rs +++ b/src/librustc_typeck/coherence/inherent_impls.rs @@ -22,7 +22,6 @@ use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::ty::{self, CrateInherentImpls, TyCtxt}; -use rustc::util::nodemap::DefIdMap; use rustc_data_structures::sync::Lrc; use syntax::ast; @@ -31,18 +30,16 @@ use syntax_pos::Span; /// On-demand query: yields a map containing all types mapped to their inherent impls. pub fn crate_inherent_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) - -> CrateInherentImpls { + -> Lrc { assert_eq!(crate_num, LOCAL_CRATE); let krate = tcx.hir.krate(); let mut collect = InherentCollect { tcx, - impls_map: CrateInherentImpls { - inherent_impls: DefIdMap() - } + impls_map: Default::default(), }; krate.visit_all_item_likes(&mut collect); - collect.impls_map + Lrc::new(collect.impls_map) } /// On-demand query: yields a vector of the inherent impls for a specific type. diff --git a/src/librustc_typeck/coherence/orphan.rs b/src/librustc_typeck/coherence/orphan.rs index b155587ddd..14c6864434 100644 --- a/src/librustc_typeck/coherence/orphan.rs +++ b/src/librustc_typeck/coherence/orphan.rs @@ -17,7 +17,7 @@ use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir; pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut orphan = OrphanChecker { tcx: tcx }; + let mut orphan = OrphanChecker { tcx }; tcx.hir.krate().visit_all_item_likes(&mut orphan); } diff --git a/src/librustc_typeck/coherence/unsafety.rs b/src/librustc_typeck/coherence/unsafety.rs index bdbf93ddec..0894c1d49e 100644 --- a/src/librustc_typeck/coherence/unsafety.rs +++ b/src/librustc_typeck/coherence/unsafety.rs @@ -16,7 +16,7 @@ use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir::{self, Unsafety}; pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut unsafety = UnsafetyChecker { tcx: tcx }; + let mut unsafety = UnsafetyChecker { tcx }; tcx.hir.krate().visit_all_item_likes(&mut unsafety); } diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index b33b21478c..a1bb0b53f1 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -39,6 +39,7 @@ use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt}; use rustc::ty::{ReprOptions, ToPredicate}; use rustc::util::captures::Captures; use rustc::util::nodemap::FxHashMap; +use rustc_data_structures::sync::Lrc; use rustc_target::spec::abi; use syntax::ast; @@ -58,11 +59,13 @@ use rustc::hir::{self, CodegenFnAttrFlags, CodegenFnAttrs, Unsafety}; use std::iter; +struct OnlySelfBounds(bool); + /////////////////////////////////////////////////////////////////////////// // Main entry point pub fn collect_item_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut visitor = CollectItemTypesVisitor { tcx: tcx }; + let mut visitor = CollectItemTypesVisitor { tcx }; tcx.hir .krate() .visit_all_item_likes(&mut visitor.as_deep_visitor()); @@ -176,7 +179,8 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { self.tcx } - fn get_type_parameter_bounds(&self, span: Span, def_id: DefId) -> ty::GenericPredicates<'tcx> { + fn get_type_parameter_bounds(&self, span: Span, def_id: DefId) + -> Lrc> { self.tcx .at(span) .type_param_predicates((self.item_def_id, def_id)) @@ -208,7 +212,7 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { item_def_id: DefId, poly_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Ty<'tcx> { - if let Some(trait_ref) = poly_trait_ref.no_late_bound_regions() { + if let Some(trait_ref) = poly_trait_ref.no_bound_vars() { self.tcx().mk_projection(item_def_id, trait_ref.substs) } else { // no late-bound regions, we can just ignore the binder @@ -241,12 +245,12 @@ impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { fn type_param_predicates<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, (item_def_id, def_id): (DefId, DefId), -) -> ty::GenericPredicates<'tcx> { +) -> Lrc> { use rustc::hir::*; // In the AST, bounds can derive from two places. Either - // written inline like `` or in a where clause like - // `where T:Foo`. + // written inline like `` or in a where clause like + // `where T : Foo`. let param_id = tcx.hir.as_local_node_id(def_id).unwrap(); let param_owner = tcx.hir.ty_param_owner(param_id); @@ -262,11 +266,11 @@ fn type_param_predicates<'a, 'tcx>( tcx.generics_of(item_def_id).parent }; - let mut result = parent.map_or( - ty::GenericPredicates { + let mut result = parent.map_or_else( + || Lrc::new(ty::GenericPredicates { parent: None, predicates: vec![], - }, + }), |parent| { let icx = ItemCtxt::new(tcx, parent); icx.get_type_parameter_bounds(DUMMY_SP, def_id) @@ -296,7 +300,7 @@ fn type_param_predicates<'a, 'tcx>( // Implied `Self: Trait` and supertrait bounds. if param_id == item_node_id { let identity_trait_ref = ty::TraitRef::identity(tcx, item_def_id); - result + Lrc::make_mut(&mut result) .predicates .push((identity_trait_ref.to_predicate(), item.span)); } @@ -315,14 +319,15 @@ fn type_param_predicates<'a, 'tcx>( }; let icx = ItemCtxt::new(tcx, item_def_id); - result + Lrc::make_mut(&mut result) .predicates - .extend(icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty)); + .extend(icx.type_parameter_bounds_in_generics(ast_generics, param_id, ty, + OnlySelfBounds(true))); result } impl<'a, 'tcx> ItemCtxt<'a, 'tcx> { - /// Find bounds from hir::Generics. This requires scanning through the + /// Find bounds from `hir::Generics`. This requires scanning through the /// AST. We do this to avoid having to convert *all* the bounds, which /// would create artificial cycles. Instead we can only convert the /// bounds for a type parameter `X` if `X::Foo` is used. @@ -331,6 +336,7 @@ impl<'a, 'tcx> ItemCtxt<'a, 'tcx> { ast_generics: &hir::Generics, param_id: ast::NodeId, ty: Ty<'tcx>, + only_self_bounds: OnlySelfBounds, ) -> Vec<(ty::Predicate<'tcx>, Span)> { let from_ty_params = ast_generics .params @@ -350,9 +356,17 @@ impl<'a, 'tcx> ItemCtxt<'a, 'tcx> { hir::WherePredicate::BoundPredicate(ref bp) => Some(bp), _ => None, }) - .filter(|bp| is_param(self.tcx, &bp.bounded_ty, param_id)) - .flat_map(|bp| bp.bounds.iter()) - .flat_map(|b| predicates_from_bound(self, ty, b)); + .flat_map(|bp| { + let bt = if is_param(self.tcx, &bp.bounded_ty, param_id) { + Some(ty) + } else if !only_self_bounds.0 { + Some(self.to_ty(&bp.bounded_ty)) + } else { + None + }; + bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b))) + }) + .flat_map(|(bt, b)| predicates_from_bound(self, bt, b)); from_ty_params.chain(from_where_clauses).collect() } @@ -419,12 +433,9 @@ fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) { tcx.predicates_of(def_id); } hir::ItemKind::TraitAlias(..) => { - span_err!( - tcx.sess, - it.span, - E0645, - "trait aliases are not yet implemented (see issue #41517)" - ); + tcx.generics_of(def_id); + tcx.at(it.span).super_predicates_of(def_id); + tcx.predicates_of(def_id); } hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { tcx.generics_of(def_id); @@ -642,7 +653,7 @@ fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Ad }; ( AdtKind::Struct, - vec![convert_variant( + std::iter::once(convert_variant( tcx, ctor_id.unwrap_or(def_id), item.name, @@ -650,12 +661,12 @@ fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Ad def, AdtKind::Struct, def_id - )], + )).collect(), ) } ItemKind::Union(ref def, _) => ( AdtKind::Union, - vec![convert_variant( + std::iter::once(convert_variant( tcx, def_id, item.name, @@ -663,7 +674,7 @@ fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Ad def, AdtKind::Union, def_id - )], + )).collect(), ), _ => bug!(), }; @@ -676,7 +687,7 @@ fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Ad fn super_predicates_of<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_def_id: DefId, -) -> ty::GenericPredicates<'tcx> { +) -> Lrc> { debug!("super_predicates(trait_def_id={:?})", trait_def_id); let trait_node_id = tcx.hir.as_local_node_id(trait_def_id).unwrap(); @@ -693,15 +704,20 @@ fn super_predicates_of<'a, 'tcx>( let icx = ItemCtxt::new(tcx, trait_def_id); - // Convert the bounds that follow the colon, e.g. `Bar+Zed` in `trait Foo : Bar+Zed`. + // Convert the bounds that follow the colon, e.g. `Bar + Zed` in `trait Foo : Bar + Zed`. let self_param_ty = tcx.mk_self_type(); let superbounds1 = compute_bounds(&icx, self_param_ty, bounds, SizedByDefault::No, item.span); let superbounds1 = superbounds1.predicates(tcx, self_param_ty); // Convert any explicit superbounds in the where clause, - // e.g. `trait Foo where Self : Bar`: - let superbounds2 = icx.type_parameter_bounds_in_generics(generics, item.id, self_param_ty); + // e.g. `trait Foo where Self : Bar`. + // In the case of trait aliases, however, we include all bounds in the where clause, + // so e.g. `trait Foo = where u32: PartialEq` would include `u32: PartialEq` + // as one of its "superpredicates". + let is_trait_alias = ty::is_trait_alias(tcx, trait_def_id); + let superbounds2 = icx.type_parameter_bounds_in_generics( + generics, item.id, self_param_ty, OnlySelfBounds(!is_trait_alias)); // Combine the two lists to form the complete set of superbounds: let superbounds: Vec<_> = superbounds1.into_iter().chain(superbounds2).collect(); @@ -709,15 +725,16 @@ fn super_predicates_of<'a, 'tcx>( // Now require that immediate supertraits are converted, // which will, in turn, reach indirect supertraits. for &(pred, span) in &superbounds { + debug!("superbound: {:?}", pred); if let ty::Predicate::Trait(bound) = pred { tcx.at(span).super_predicates_of(bound.def_id()); } } - ty::GenericPredicates { + Lrc::new(ty::GenericPredicates { parent: None, predicates: superbounds, - } + }) } fn trait_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::TraitDef { @@ -1590,28 +1607,34 @@ fn early_bound_lifetimes_from_generics<'a, 'tcx>( fn predicates_defined_on<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, -) -> ty::GenericPredicates<'tcx> { +) -> Lrc> { debug!("predicates_defined_on({:?})", def_id); - let explicit = tcx.explicit_predicates_of(def_id); - let span = tcx.def_span(def_id); - let predicates = explicit.predicates.into_iter().chain( - tcx.inferred_outlives_of(def_id).iter().map(|&p| (p, span)) - ).collect(); - - ty::GenericPredicates { - parent: explicit.parent, - predicates: predicates, + let mut result = tcx.explicit_predicates_of(def_id); + debug!( + "predicates_defined_on: explicit_predicates_of({:?}) = {:?}", + def_id, + result, + ); + let inferred_outlives = tcx.inferred_outlives_of(def_id); + if !inferred_outlives.is_empty() { + let span = tcx.def_span(def_id); + debug!( + "predicates_defined_on: inferred_outlives_of({:?}) = {:?}", + def_id, + inferred_outlives, + ); + Lrc::make_mut(&mut result) + .predicates + .extend(inferred_outlives.iter().map(|&p| (p, span))); } + result } fn predicates_of<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, -) -> ty::GenericPredicates<'tcx> { - let ty::GenericPredicates { - parent, - mut predicates, - } = tcx.predicates_defined_on(def_id); +) -> Lrc> { + let mut result = tcx.predicates_defined_on(def_id); if tcx.is_trait(def_id) { // For traits, add `Self: Trait` predicate. This is @@ -1627,16 +1650,17 @@ fn predicates_of<'a, 'tcx>( // used, and adding the predicate into this list ensures // that this is done. let span = tcx.def_span(def_id); - predicates.push((ty::TraitRef::identity(tcx, def_id).to_predicate(), span)); + Lrc::make_mut(&mut result) + .predicates + .push((ty::TraitRef::identity(tcx, def_id).to_predicate(), span)); } - - ty::GenericPredicates { parent, predicates } + result } fn explicit_predicates_of<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, -) -> ty::GenericPredicates<'tcx> { +) -> Lrc> { use rustc::hir::*; use rustc_data_structures::fx::FxHashSet; @@ -1679,6 +1703,7 @@ fn explicit_predicates_of<'a, 'tcx>( let icx = ItemCtxt::new(tcx, def_id); let no_generics = hir::Generics::empty(); + let empty_trait_items = HirVec::new(); let mut predicates = UniquePredicates::new(); @@ -1723,6 +1748,10 @@ fn explicit_predicates_of<'a, 'tcx>( is_trait = Some((ty::TraitRef::identity(tcx, def_id), items)); generics } + ItemKind::TraitAlias(ref generics, _) => { + is_trait = Some((ty::TraitRef::identity(tcx, def_id), &empty_trait_items)); + generics + } ItemKind::Existential(ExistTy { ref bounds, impl_trait_fn, @@ -1742,10 +1771,10 @@ fn explicit_predicates_of<'a, 'tcx>( if impl_trait_fn.is_some() { // impl Trait - return ty::GenericPredicates { + return Lrc::new(ty::GenericPredicates { parent: None, predicates: bounds.predicates(tcx, opaque_ty), - }; + }); } else { // named existential types predicates.extend(bounds.predicates(tcx, opaque_ty)); @@ -1775,7 +1804,7 @@ fn explicit_predicates_of<'a, 'tcx>( // on a trait we need to add in the supertrait bounds and bounds found on // associated types. if let Some((_trait_ref, _)) = is_trait { - predicates.extend(tcx.super_predicates_of(def_id).predicates); + predicates.extend(tcx.super_predicates_of(def_id).predicates.iter().cloned()); } // In default impls, we can assume that the self type implements @@ -1838,8 +1867,9 @@ fn explicit_predicates_of<'a, 'tcx>( &hir::WherePredicate::BoundPredicate(ref bound_pred) => { let ty = icx.to_ty(&bound_pred.bounded_ty); - // Keep the type around in a WF predicate, in case of no bounds. - // That way, `where Ty:` is not a complete noop (see #53696). + // Keep the type around in a dummy predicate, in case of no bounds. + // That way, `where Ty:` is not a complete noop (see #53696) and `Ty` + // is still checked for WF. if bound_pred.bounds.is_empty() { if let ty::Param(_) = ty.sty { // This is a `where T:`, which can be in the HIR from the @@ -1850,7 +1880,10 @@ fn explicit_predicates_of<'a, 'tcx>( // compiler/tooling bugs from not handling WF predicates. } else { let span = bound_pred.bounded_ty.span; - predicates.push((ty::Predicate::WellFormed(ty), span)); + let predicate = ty::OutlivesPredicate(ty, tcx.mk_region(ty::ReEmpty)); + predicates.push( + (ty::Predicate::TypeOutlives(ty::Binder::dummy(predicate)), span) + ); } } @@ -1859,7 +1892,7 @@ fn explicit_predicates_of<'a, 'tcx>( &hir::GenericBound::Trait(ref poly_trait_ref, _) => { let mut projections = Vec::new(); - let trait_ref = AstConv::instantiate_poly_trait_ref( + let (trait_ref, _) = AstConv::instantiate_poly_trait_ref( &icx, poly_trait_ref, ty, @@ -1948,10 +1981,10 @@ fn explicit_predicates_of<'a, 'tcx>( ); } - ty::GenericPredicates { + Lrc::new(ty::GenericPredicates { parent: generics.parent, predicates, - } + }) } pub enum SizedByDefault { @@ -1959,9 +1992,9 @@ pub enum SizedByDefault { No, } -/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or -/// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the -/// built-in trait (formerly known as kind): Send. +/// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped `Ty` +/// or a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the +/// built-in trait `Send`. pub fn compute_bounds<'gcx: 'tcx, 'tcx>( astconv: &dyn AstConv<'gcx, 'tcx>, param_ty: Ty<'tcx>, @@ -1969,8 +2002,8 @@ pub fn compute_bounds<'gcx: 'tcx, 'tcx>( sized_by_default: SizedByDefault, span: Span, ) -> Bounds<'tcx> { - let mut region_bounds = vec![]; - let mut trait_bounds = vec![]; + let mut region_bounds = Vec::new(); + let mut trait_bounds = Vec::new(); for ast_bound in ast_bounds { match *ast_bound { @@ -1980,10 +2013,15 @@ pub fn compute_bounds<'gcx: 'tcx, 'tcx>( } } - let mut projection_bounds = vec![]; + let mut projection_bounds = Vec::new(); let mut trait_bounds: Vec<_> = trait_bounds.iter().map(|&bound| { - (astconv.instantiate_poly_trait_ref(bound, param_ty, &mut projection_bounds), bound.span) + let (poly_trait_ref, _) = astconv.instantiate_poly_trait_ref( + bound, + param_ty, + &mut projection_bounds, + ); + (poly_trait_ref, bound.span) }).collect(); let region_bounds = region_bounds @@ -2011,10 +2049,10 @@ pub fn compute_bounds<'gcx: 'tcx, 'tcx>( } } -/// Converts a specific GenericBound from the AST into a set of +/// Converts a specific `GenericBound` from the AST into a set of /// predicates that apply to the self-type. A vector is returned -/// because this can be anywhere from 0 predicates (`T:?Sized` adds no -/// predicates) to 1 (`T:Foo`) to many (`T:Bar` adds `T:Bar` +/// because this can be anywhere from zero predicates (`T : ?Sized` adds no +/// predicates) to one (`T : Foo`) to many (`T : Bar` adds `T : Bar` /// and `::X == i32`). fn predicates_from_bound<'tcx>( astconv: &dyn AstConv<'tcx, 'tcx>, @@ -2024,7 +2062,7 @@ fn predicates_from_bound<'tcx>( match *bound { hir::GenericBound::Trait(ref tr, hir::TraitBoundModifier::None) => { let mut projections = Vec::new(); - let pred = astconv.instantiate_poly_trait_ref(tr, param_ty, &mut projections); + let (pred, _) = astconv.instantiate_poly_trait_ref(tr, param_ty, &mut projections); iter::once((pred.to_predicate(), tr.span)).chain( projections .into_iter() diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index f57d050fa2..084951f4a2 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -538,7 +538,7 @@ fn main() { let foo = Foo; let ref_foo = &&Foo; - // error, reached the recursion limit while auto-dereferencing &&Foo + // error, reached the recursion limit while auto-dereferencing `&&Foo` ref_foo.foo(); } ``` @@ -3084,6 +3084,66 @@ containing the unsized type is the last and only unsized type field in the struct. "##, +E0378: r##" +The `DispatchFromDyn` trait currently can only be implemented for +builtin pointer types and structs that are newtype wrappers around them +— that is, the struct must have only one field (except for`PhantomData`), +and that field must itself implement `DispatchFromDyn`. + +Examples: + +``` +#![feature(dispatch_from_dyn, unsize)] +use std::{ + marker::Unsize, + ops::DispatchFromDyn, +}; + +struct Ptr(*const T); + +impl DispatchFromDyn> for Ptr +where + T: Unsize, +{} +``` + +``` +#![feature(dispatch_from_dyn)] +use std::{ + ops::DispatchFromDyn, + marker::PhantomData, +}; + +struct Wrapper { + ptr: T, + _phantom: PhantomData<()>, +} + +impl DispatchFromDyn> for Wrapper +where + T: DispatchFromDyn, +{} +``` + +Example of illegal `DispatchFromDyn` implementation +(illegal because of extra field) + +```compile-fail,E0378 +#![feature(dispatch_from_dyn)] +use std::ops::DispatchFromDyn; + +struct WrapperExtraField { + ptr: T, + extra_stuff: i32, +} + +impl DispatchFromDyn> for WrapperExtraField +where + T: DispatchFromDyn, +{} +``` +"##, + E0390: r##" You tried to implement methods for a primitive type. Erroneous code example: @@ -4849,4 +4909,5 @@ register_diagnostics! { E0641, // cannot cast to/from a pointer with an unknown kind E0645, // trait aliases not finished E0698, // type inside generator must be known in this context + E0719, // duplicate values for associated type binding } diff --git a/src/librustc_typeck/impl_wf_check.rs b/src/librustc_typeck/impl_wf_check.rs index edf3ddf7bd..74a53f7fca 100644 --- a/src/librustc_typeck/impl_wf_check.rs +++ b/src/librustc_typeck/impl_wf_check.rs @@ -62,7 +62,7 @@ pub fn impl_wf_check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // We will tag this as part of the WF check -- logically, it is, // but it's one that we must perform earlier than the rest of // WfCheck. - tcx.hir.krate().visit_all_item_likes(&mut ImplWfCheck { tcx: tcx }); + tcx.hir.krate().visit_all_item_likes(&mut ImplWfCheck { tcx }); } struct ImplWfCheck<'a, 'tcx: 'a> { diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index 1f5998d8ca..0fba311d7f 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -389,7 +389,7 @@ pub fn hir_trait_to_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir_trait: let env_def_id = tcx.hir.local_def_id(env_node_id); let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id); let mut projections = Vec::new(); - let principal = astconv::AstConv::instantiate_poly_trait_ref_inner( + let (principal, _) = astconv::AstConv::instantiate_poly_trait_ref_inner( &item_cx, hir_trait, tcx.types.err, &mut projections, true ); diff --git a/src/librustc_typeck/outlives/explicit.rs b/src/librustc_typeck/outlives/explicit.rs index 75f8b78b9e..9b374cf932 100644 --- a/src/librustc_typeck/outlives/explicit.rs +++ b/src/librustc_typeck/outlives/explicit.rs @@ -33,14 +33,14 @@ impl<'tcx> ExplicitPredicatesMap<'tcx> { ) -> &RequiredPredicates<'tcx> { self.map.entry(def_id).or_insert_with(|| { let predicates = if def_id.is_local() { - tcx.explicit_predicates_of(def_id).predicates + tcx.explicit_predicates_of(def_id) } else { - tcx.predicates_of(def_id).predicates + tcx.predicates_of(def_id) }; let mut required_predicates = RequiredPredicates::default(); // process predicates and convert to `RequiredPredicates` entry, see below - for (pred, _) in predicates.into_iter() { + for (pred, _) in predicates.predicates.iter() { match pred { ty::Predicate::TypeOutlives(predicate) => { let OutlivesPredicate(ref ty, ref reg) = predicate.skip_binder(); diff --git a/src/librustc_typeck/outlives/utils.rs b/src/librustc_typeck/outlives/utils.rs index 96b75c4792..6ed59837eb 100644 --- a/src/librustc_typeck/outlives/utils.rs +++ b/src/librustc_typeck/outlives/utils.rs @@ -11,6 +11,7 @@ use rustc::ty::outlives::Component; use rustc::ty::subst::{Kind, UnpackedKind}; use rustc::ty::{self, Region, RegionKind, Ty, TyCtxt}; +use smallvec::smallvec; use std::collections::BTreeSet; /// Tracks the `T: 'a` or `'a: 'a` predicates that we have inferred @@ -40,7 +41,9 @@ pub fn insert_outlives_predicate<'tcx>( // // Or if within `struct Foo` you had `T = Vec`, then // we would want to add `U: 'outlived_region` - for component in tcx.outlives_components(ty) { + let mut components = smallvec![]; + tcx.push_outlives_components(ty, &mut components); + for component in components { match component { Component::Region(r) => { // This would arise from something like: @@ -167,7 +170,6 @@ fn is_free_region<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, region: Region<'_>) -> bool RegionKind::ReEmpty | RegionKind::ReErased | RegionKind::ReClosureBound(..) - | RegionKind::ReCanonical(..) | RegionKind::ReScope(..) | RegionKind::ReVar(..) | RegionKind::RePlaceholder(..) diff --git a/src/librustc_typeck/variance/constraints.rs b/src/librustc_typeck/variance/constraints.rs index 3e523c0c7f..ed32e5a8d9 100644 --- a/src/librustc_typeck/variance/constraints.rs +++ b/src/librustc_typeck/variance/constraints.rs @@ -336,8 +336,10 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // types, where we use Error as the Self type } + ty::Placeholder(..) | ty::UnnormalizedProjection(..) | ty::GeneratorWitness(..) | + ty::Bound(..) | ty::Infer(..) => { bug!("unexpected type encountered in \ variance inference: {}", @@ -426,7 +428,6 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // way early-bound regions do, so we skip them here. } - ty::ReCanonical(_) | ty::ReFree(..) | ty::ReClosureBound(..) | ty::ReScope(..) | diff --git a/src/librustc_typeck/variance/mod.rs b/src/librustc_typeck/variance/mod.rs index 7cc56bc192..e3c82d50a8 100644 --- a/src/librustc_typeck/variance/mod.rs +++ b/src/librustc_typeck/variance/mod.rs @@ -11,7 +11,7 @@ //! Module for inferring the variance of type and lifetime parameters. See the [rustc guide] //! chapter for more info. //! -//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/variance.html +//! [rustc guide]: https://rust-lang.github.io/rustc-guide/variance.html use arena; use rustc::hir; diff --git a/src/librustc_typeck/variance/terms.rs b/src/librustc_typeck/variance/terms.rs index 0aec31609b..3692221a3f 100644 --- a/src/librustc_typeck/variance/terms.rs +++ b/src/librustc_typeck/variance/terms.rs @@ -81,7 +81,7 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx> let mut terms_cx = TermsContext { tcx, arena, - inferred_starts: NodeMap(), + inferred_starts: Default::default(), inferred_terms: vec![], lang_items: lang_items(tcx), @@ -89,8 +89,8 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx> // See the following for a discussion on dep-graph management. // - // - https://rust-lang-nursery.github.io/rustc-guide/query.html - // - https://rust-lang-nursery.github.io/rustc-guide/variance.html + // - https://rust-lang.github.io/rustc-guide/query.html + // - https://rust-lang.github.io/rustc-guide/variance.html tcx.hir.krate().visit_all_item_likes(&mut terms_cx); terms_cx diff --git a/src/librustdoc/README.md b/src/librustdoc/README.md index 2cfe43a838..e4f7bc30e3 100644 --- a/src/librustdoc/README.md +++ b/src/librustdoc/README.md @@ -1,3 +1,3 @@ For more information about how `librustdoc` works, see the [rustc guide]. -[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/rustdoc.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/rustdoc.html diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 4281ccf5f6..fd8f70b19e 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -38,10 +38,12 @@ use rustc::hir::def::{self, Def, CtorKind}; use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc::ty::subst::Substs; use rustc::ty::{self, TyCtxt, Region, RegionVid, Ty, AdtKind}; +use rustc::ty::layout::VariantIdx; use rustc::middle::stability; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_typeck::hir_ty_to_ty; use rustc::infer::region_constraints::{RegionConstraintData, Constraint}; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use std::collections::hash_map::Entry; use std::fmt; @@ -98,6 +100,12 @@ impl, U> Clean> for [T] { } } +impl, U, V: Idx> Clean> for IndexVec { + fn clean(&self, cx: &DocContext) -> IndexVec { + self.iter().map(|x| x.clean(cx)).collect() + } +} + impl, U> Clean for P { fn clean(&self, cx: &DocContext) -> U { (**self).clean(cx) @@ -552,6 +560,14 @@ impl ItemEnum { _ => return None, }) } + + pub fn is_associated(&self) -> bool { + match *self { + ItemEnum::TypedefItem(_, _) | + ItemEnum::AssociatedTypeItem(_, _) => true, + _ => false, + } + } } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] @@ -1260,7 +1276,6 @@ impl Clean> for ty::RegionKind { ty::RePlaceholder(..) | ty::ReEmpty | ty::ReClosureBound(_) | - ty::ReCanonical(_) | ty::ReErased => None } } @@ -1310,15 +1325,10 @@ impl<'a> Clean for ty::Predicate<'a> { Predicate::RegionOutlives(ref pred) => pred.clean(cx), Predicate::TypeOutlives(ref pred) => pred.clean(cx), Predicate::Projection(ref pred) => pred.clean(cx), - Predicate::WellFormed(ty) => { - // This comes from `where Ty:` (i.e. no bounds) (see #53696). - WherePredicate::BoundPredicate { - ty: ty.clean(cx), - bounds: vec![], - } - } - Predicate::ObjectSafe(_) => panic!("not user writable"), - Predicate::ClosureKind(..) => panic!("not user writable"), + + Predicate::WellFormed(..) | + Predicate::ObjectSafe(..) | + Predicate::ClosureKind(..) | Predicate::ConstEvaluatable(..) => panic!("not user writable"), } } @@ -1553,7 +1563,7 @@ impl Clean for hir::Generics { } impl<'a, 'tcx> Clean for (&'a ty::Generics, - &'a ty::GenericPredicates<'tcx>) { + &'a Lrc>) { fn clean(&self, cx: &DocContext) -> Generics { use self::WherePredicate as WP; @@ -2733,6 +2743,8 @@ impl<'tcx> Clean for Ty<'tcx> { ty::Closure(..) | ty::Generator(..) => Tuple(vec![]), // FIXME(pcwalton) + ty::Bound(..) => panic!("Bound"), + ty::Placeholder(..) => panic!("Placeholder"), ty::UnnormalizedProjection(..) => panic!("UnnormalizedProjection"), ty::GeneratorWitness(..) => panic!("GeneratorWitness"), ty::Infer(..) => panic!("Infer"), @@ -2878,7 +2890,7 @@ impl Clean for ::rustc::hir::VariantData { #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Enum { - pub variants: Vec, + pub variants: IndexVec, pub generics: Generics, pub variants_stripped: bool, } @@ -2894,7 +2906,7 @@ impl Clean for doctree::Enum { stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), inner: EnumItem(Enum { - variants: self.variants.clean(cx), + variants: self.variants.iter().map(|v| v.clean(cx)).collect(), generics: self.generics.clean(cx), variants_stripped: false, }), @@ -2958,7 +2970,7 @@ impl<'tcx> Clean for ty::VariantDef { source: cx.tcx.def_span(self.did).clean(cx), visibility: Some(Inherited), def_id: self.did, - inner: VariantItem(Variant { kind: kind }), + inner: VariantItem(Variant { kind }), stability: get_stability(cx, self.did), deprecation: get_deprecation(cx, self.did), } diff --git a/src/librustdoc/clean/simplify.rs b/src/librustdoc/clean/simplify.rs index eda522af92..635608d140 100644 --- a/src/librustdoc/clean/simplify.rs +++ b/src/librustdoc/clean/simplify.rs @@ -156,8 +156,8 @@ fn trait_is_same_or_supertrait(cx: &DocContext, child: DefId, if child == trait_ { return true } - let predicates = cx.tcx.super_predicates_of(child).predicates; - predicates.iter().filter_map(|(pred, _)| { + let predicates = cx.tcx.super_predicates_of(child); + predicates.predicates.iter().filter_map(|(pred, _)| { if let ty::Predicate::Trait(ref pred) = *pred { if pred.skip_binder().trait_ref.self_ty().is_self() { Some(pred.def_id()) diff --git a/src/librustdoc/config.rs b/src/librustdoc/config.rs new file mode 100644 index 0000000000..f4d05c6dbd --- /dev/null +++ b/src/librustdoc/config.rs @@ -0,0 +1,561 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::{BTreeMap, BTreeSet}; +use std::fmt; +use std::path::PathBuf; + +use errors; +use errors::emitter::ColorConfig; +use getopts; +use rustc::lint::Level; +use rustc::session::early_error; +use rustc::session::config::{CodegenOptions, DebuggingOptions, ErrorOutputType, Externs}; +use rustc::session::config::{nightly_options, build_codegen_options, build_debugging_options, + get_cmd_lint_options}; +use rustc::session::search_paths::SearchPaths; +use rustc_driver; +use rustc_target::spec::TargetTriple; +use syntax::edition::Edition; + +use core::new_handler; +use externalfiles::ExternalHtml; +use html; +use html::markdown::IdMap; +use html::static_files; +use opts; +use passes::{self, DefaultPassOption}; +use theme; + +/// Configuration options for rustdoc. +#[derive(Clone)] +pub struct Options { + // Basic options / Options passed directly to rustc + + /// The crate root or Markdown file to load. + pub input: PathBuf, + /// The name of the crate being documented. + pub crate_name: Option, + /// How to format errors and warnings. + pub error_format: ErrorOutputType, + /// Library search paths to hand to the compiler. + pub libs: SearchPaths, + /// The list of external crates to link against. + pub externs: Externs, + /// List of `cfg` flags to hand to the compiler. Always includes `rustdoc`. + pub cfgs: Vec, + /// Codegen options to hand to the compiler. + pub codegen_options: CodegenOptions, + /// Debugging (`-Z`) options to pass to the compiler. + pub debugging_options: DebuggingOptions, + /// The target used to compile the crate against. + pub target: Option, + /// Edition used when reading the crate. Defaults to "2015". Also used by default when + /// compiling doctests from the crate. + pub edition: Edition, + /// The path to the sysroot. Used during the compilation process. + pub maybe_sysroot: Option, + /// Linker to use when building doctests. + pub linker: Option, + /// Lint information passed over the command-line. + pub lint_opts: Vec<(String, Level)>, + /// Whether to ask rustc to describe the lints it knows. Practically speaking, this will not be + /// used, since we abort if we have no input file, but it's included for completeness. + pub describe_lints: bool, + /// What level to cap lints at. + pub lint_cap: Option, + + // Options specific to running doctests + + /// Whether we should run doctests instead of generating docs. + pub should_test: bool, + /// List of arguments to pass to the test harness, if running tests. + pub test_args: Vec, + + // Options that affect the documentation process + + /// The selected default set of passes to use. + /// + /// Be aware: This option can come both from the CLI and from crate attributes! + pub default_passes: DefaultPassOption, + /// Any passes manually selected by the user. + /// + /// Be aware: This option can come both from the CLI and from crate attributes! + pub manual_passes: Vec, + /// Whether to display warnings during doc generation or while gathering doctests. By default, + /// all non-rustdoc-specific lints are allowed when generating docs. + pub display_warnings: bool, + + // Options that alter generated documentation pages + + /// Crate version to note on the sidebar of generated docs. + pub crate_version: Option, + /// Collected options specific to outputting final pages. + pub render_options: RenderOptions, +} + +impl fmt::Debug for Options { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + struct FmtExterns<'a>(&'a Externs); + + impl<'a> fmt::Debug for FmtExterns<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_map() + .entries(self.0.iter()) + .finish() + } + } + + f.debug_struct("Options") + .field("input", &self.input) + .field("crate_name", &self.crate_name) + .field("error_format", &self.error_format) + .field("libs", &self.libs) + .field("externs", &FmtExterns(&self.externs)) + .field("cfgs", &self.cfgs) + .field("codegen_options", &"...") + .field("debugging_options", &"...") + .field("target", &self.target) + .field("edition", &self.edition) + .field("maybe_sysroot", &self.maybe_sysroot) + .field("linker", &self.linker) + .field("lint_opts", &self.lint_opts) + .field("describe_lints", &self.describe_lints) + .field("lint_cap", &self.lint_cap) + .field("should_test", &self.should_test) + .field("test_args", &self.test_args) + .field("default_passes", &self.default_passes) + .field("manual_passes", &self.manual_passes) + .field("display_warnings", &self.display_warnings) + .field("crate_version", &self.crate_version) + .field("render_options", &self.render_options) + .finish() + } +} + +/// Configuration options for the HTML page-creation process. +#[derive(Clone, Debug)] +pub struct RenderOptions { + /// Output directory to generate docs into. Defaults to `doc`. + pub output: PathBuf, + /// External files to insert into generated pages. + pub external_html: ExternalHtml, + /// A pre-populated `IdMap` with the default headings and any headings added by Markdown files + /// processed by `external_html`. + pub id_map: IdMap, + /// If present, playground URL to use in the "Run" button added to code samples. + /// + /// Be aware: This option can come both from the CLI and from crate attributes! + pub playground_url: Option, + /// Whether to sort modules alphabetically on a module page instead of using declaration order. + /// `true` by default. + /// + /// FIXME(misdreavus): the flag name is `--sort-modules-by-appearance` but the meaning is + /// inverted once read + pub sort_modules_alphabetically: bool, + /// List of themes to extend the docs with. Original argument name is included to assist in + /// displaying errors if it fails a theme check. + pub themes: Vec, + /// If present, CSS file that contains rules to add to the default CSS. + pub extension_css: Option, + /// A map of crate names to the URL to use instead of querying the crate's `html_root_url`. + pub extern_html_root_urls: BTreeMap, + /// If present, suffix added to CSS/JavaScript files when referencing them in generated pages. + pub resource_suffix: String, + /// Whether to run the static CSS/JavaScript through a minifier when outputting them. `true` by + /// default. + /// + /// FIXME(misdreavus): the flag name is `--disable-minification` but the meaning is inverted + /// once read + pub enable_minification: bool, + /// Whether to create an index page in the root of the output directory. If this is true but + /// `enable_index_page` is None, generate a static listing of crates instead. + pub enable_index_page: bool, + /// A file to use as the index page at the root of the output directory. Overrides + /// `enable_index_page` to be true if set. + pub index_page: Option, + + // Options specific to reading standalone Markdown files + + /// Whether to generate a table of contents on the output file when reading a standalone + /// Markdown file. + pub markdown_no_toc: bool, + /// Additional CSS files to link in pages generated from standalone Markdown files. + pub markdown_css: Vec, + /// If present, playground URL to use in the "Run" button added to code samples generated from + /// standalone Markdown files. If not present, `playground_url` is used. + pub markdown_playground_url: Option, +} + +impl Options { + /// Parses the given command-line for options. If an error message or other early-return has + /// been printed, returns `Err` with the exit code. + pub fn from_matches(matches: &getopts::Matches) -> Result { + // Check for unstable options. + nightly_options::check_nightly_options(&matches, &opts()); + + if matches.opt_present("h") || matches.opt_present("help") { + ::usage("rustdoc"); + return Err(0); + } else if matches.opt_present("version") { + rustc_driver::version("rustdoc", &matches); + return Err(0); + } + + if matches.opt_strs("passes") == ["list"] { + println!("Available passes for running rustdoc:"); + for pass in passes::PASSES { + println!("{:>20} - {}", pass.name(), pass.description()); + } + println!("\nDefault passes for rustdoc:"); + for &name in passes::DEFAULT_PASSES { + println!("{:>20}", name); + } + println!("\nPasses run with `--document-private-items`:"); + for &name in passes::DEFAULT_PRIVATE_PASSES { + println!("{:>20}", name); + } + return Err(0); + } + + let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) { + Some("auto") => ColorConfig::Auto, + Some("always") => ColorConfig::Always, + Some("never") => ColorConfig::Never, + None => ColorConfig::Auto, + Some(arg) => { + early_error(ErrorOutputType::default(), + &format!("argument for --color must be `auto`, `always` or `never` \ + (instead was `{}`)", arg)); + } + }; + let error_format = match matches.opt_str("error-format").as_ref().map(|s| &s[..]) { + Some("human") => ErrorOutputType::HumanReadable(color), + Some("json") => ErrorOutputType::Json(false), + Some("pretty-json") => ErrorOutputType::Json(true), + Some("short") => ErrorOutputType::Short(color), + None => ErrorOutputType::HumanReadable(color), + Some(arg) => { + early_error(ErrorOutputType::default(), + &format!("argument for --error-format must be `human`, `json` or \ + `short` (instead was `{}`)", arg)); + } + }; + + let codegen_options = build_codegen_options(matches, error_format); + let debugging_options = build_debugging_options(matches, error_format); + + let diag = new_handler(error_format, + None, + debugging_options.treat_err_as_bug, + debugging_options.ui_testing); + + // check for deprecated options + check_deprecated_options(&matches, &diag); + + let to_check = matches.opt_strs("theme-checker"); + if !to_check.is_empty() { + let paths = theme::load_css_paths(static_files::themes::LIGHT.as_bytes()); + let mut errors = 0; + + println!("rustdoc: [theme-checker] Starting tests!"); + for theme_file in to_check.iter() { + print!(" - Checking \"{}\"...", theme_file); + let (success, differences) = theme::test_theme_against(theme_file, &paths, &diag); + if !differences.is_empty() || !success { + println!(" FAILED"); + errors += 1; + if !differences.is_empty() { + println!("{}", differences.join("\n")); + } + } else { + println!(" OK"); + } + } + if errors != 0 { + return Err(1); + } + return Err(0); + } + + if matches.free.is_empty() { + diag.struct_err("missing file operand").emit(); + return Err(1); + } + if matches.free.len() > 1 { + diag.struct_err("too many file operands").emit(); + return Err(1); + } + let input = PathBuf::from(&matches.free[0]); + + let mut libs = SearchPaths::new(); + for s in &matches.opt_strs("L") { + libs.add_path(s, error_format); + } + let externs = match parse_externs(&matches) { + Ok(ex) => ex, + Err(err) => { + diag.struct_err(&err).emit(); + return Err(1); + } + }; + let extern_html_root_urls = match parse_extern_html_roots(&matches) { + Ok(ex) => ex, + Err(err) => { + diag.struct_err(err).emit(); + return Err(1); + } + }; + + let test_args = matches.opt_strs("test-args"); + let test_args: Vec = test_args.iter() + .flat_map(|s| s.split_whitespace()) + .map(|s| s.to_string()) + .collect(); + + let should_test = matches.opt_present("test"); + + let output = matches.opt_str("o") + .map(|s| PathBuf::from(&s)) + .unwrap_or_else(|| PathBuf::from("doc")); + let mut cfgs = matches.opt_strs("cfg"); + cfgs.push("rustdoc".to_string()); + + let extension_css = matches.opt_str("e").map(|s| PathBuf::from(&s)); + + if let Some(ref p) = extension_css { + if !p.is_file() { + diag.struct_err("option --extend-css argument must be a file").emit(); + return Err(1); + } + } + + let mut themes = Vec::new(); + if matches.opt_present("themes") { + let paths = theme::load_css_paths(static_files::themes::LIGHT.as_bytes()); + + for (theme_file, theme_s) in matches.opt_strs("themes") + .iter() + .map(|s| (PathBuf::from(&s), s.to_owned())) { + if !theme_file.is_file() { + diag.struct_err("option --themes arguments must all be files").emit(); + return Err(1); + } + let (success, ret) = theme::test_theme_against(&theme_file, &paths, &diag); + if !success || !ret.is_empty() { + diag.struct_err(&format!("invalid theme: \"{}\"", theme_s)) + .help("check what's wrong with the --theme-checker option") + .emit(); + return Err(1); + } + themes.push(theme_file); + } + } + + let mut id_map = html::markdown::IdMap::new(); + id_map.populate(html::render::initial_ids()); + let external_html = match ExternalHtml::load( + &matches.opt_strs("html-in-header"), + &matches.opt_strs("html-before-content"), + &matches.opt_strs("html-after-content"), + &matches.opt_strs("markdown-before-content"), + &matches.opt_strs("markdown-after-content"), &diag, &mut id_map) { + Some(eh) => eh, + None => return Err(3), + }; + + let edition = matches.opt_str("edition").unwrap_or("2015".to_string()); + let edition = match edition.parse() { + Ok(e) => e, + Err(_) => { + diag.struct_err("could not parse edition").emit(); + return Err(1); + } + }; + + match matches.opt_str("r").as_ref().map(|s| &**s) { + Some("rust") | None => {} + Some(s) => { + diag.struct_err(&format!("unknown input format: {}", s)).emit(); + return Err(1); + } + } + + match matches.opt_str("w").as_ref().map(|s| &**s) { + Some("html") | None => {} + Some(s) => { + diag.struct_err(&format!("unknown output format: {}", s)).emit(); + return Err(1); + } + } + + let index_page = matches.opt_str("index-page").map(|s| PathBuf::from(&s)); + if let Some(ref index_page) = index_page { + if !index_page.is_file() { + diag.struct_err("option `--index-page` argument must be a file").emit(); + return Err(1); + } + } + + let target = matches.opt_str("target").map(|target| { + if target.ends_with(".json") { + TargetTriple::TargetPath(PathBuf::from(target)) + } else { + TargetTriple::TargetTriple(target) + } + }); + + let default_passes = if matches.opt_present("no-defaults") { + passes::DefaultPassOption::None + } else if matches.opt_present("document-private-items") { + passes::DefaultPassOption::Private + } else { + passes::DefaultPassOption::Default + }; + let manual_passes = matches.opt_strs("passes"); + + let crate_name = matches.opt_str("crate-name"); + let playground_url = matches.opt_str("playground-url"); + let maybe_sysroot = matches.opt_str("sysroot").map(PathBuf::from); + let display_warnings = matches.opt_present("display-warnings"); + let linker = matches.opt_str("linker").map(PathBuf::from); + let sort_modules_alphabetically = !matches.opt_present("sort-modules-by-appearance"); + let resource_suffix = matches.opt_str("resource-suffix").unwrap_or_default(); + let enable_minification = !matches.opt_present("disable-minification"); + let markdown_no_toc = matches.opt_present("markdown-no-toc"); + let markdown_css = matches.opt_strs("markdown-css"); + let markdown_playground_url = matches.opt_str("markdown-playground-url"); + let crate_version = matches.opt_str("crate-version"); + let enable_index_page = matches.opt_present("enable-index-page") || index_page.is_some(); + + let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format); + + Ok(Options { + input, + crate_name, + error_format, + libs, + externs, + cfgs, + codegen_options, + debugging_options, + target, + edition, + maybe_sysroot, + linker, + lint_opts, + describe_lints, + lint_cap, + should_test, + test_args, + default_passes, + manual_passes, + display_warnings, + crate_version, + render_options: RenderOptions { + output, + external_html, + id_map, + playground_url, + sort_modules_alphabetically, + themes, + extension_css, + extern_html_root_urls, + resource_suffix, + enable_minification, + enable_index_page, + index_page, + markdown_no_toc, + markdown_css, + markdown_playground_url, + } + }) + } + + /// Returns whether the file given as `self.input` is a Markdown file. + pub fn markdown_input(&self) -> bool { + self.input.extension() + .map_or(false, |e| e == "md" || e == "markdown") + } +} + +/// Prints deprecation warnings for deprecated options +fn check_deprecated_options(matches: &getopts::Matches, diag: &errors::Handler) { + let deprecated_flags = [ + "input-format", + "output-format", + "no-defaults", + "passes", + ]; + + for flag in deprecated_flags.into_iter() { + if matches.opt_present(flag) { + let mut err = diag.struct_warn(&format!("the '{}' flag is considered deprecated", + flag)); + err.warn("please see https://github.com/rust-lang/rust/issues/44136"); + + if *flag == "no-defaults" { + err.help("you may want to use --document-private-items"); + } + + err.emit(); + } + } + + let removed_flags = [ + "plugins", + "plugin-path", + ]; + + for &flag in removed_flags.iter() { + if matches.opt_present(flag) { + diag.struct_warn(&format!("the '{}' flag no longer functions", flag)) + .warn("see CVE-2018-1000622") + .emit(); + } + } +} + +/// Extracts `--extern-html-root-url` arguments from `matches` and returns a map of crate names to +/// the given URLs. If an `--extern-html-root-url` argument was ill-formed, returns an error +/// describing the issue. +fn parse_extern_html_roots( + matches: &getopts::Matches, +) -> Result, &'static str> { + let mut externs = BTreeMap::new(); + for arg in &matches.opt_strs("extern-html-root-url") { + let mut parts = arg.splitn(2, '='); + let name = parts.next().ok_or("--extern-html-root-url must not be empty")?; + let url = parts.next().ok_or("--extern-html-root-url must be of the form name=url")?; + externs.insert(name.to_string(), url.to_string()); + } + + Ok(externs) +} + +/// Extracts `--extern CRATE=PATH` arguments from `matches` and +/// returns a map mapping crate names to their paths or else an +/// error message. +// FIXME(eddyb) This shouldn't be duplicated with `rustc::session`. +fn parse_externs(matches: &getopts::Matches) -> Result { + let mut externs: BTreeMap<_, BTreeSet<_>> = BTreeMap::new(); + for arg in &matches.opt_strs("extern") { + let mut parts = arg.splitn(2, '='); + let name = parts.next().ok_or("--extern value must not be empty".to_string())?; + let location = parts.next().map(|s| s.to_string()); + if location.is_none() && !nightly_options::is_unstable_enabled(matches) { + return Err("the `-Z unstable-options` flag must also be passed to \ + enable `--extern crate_name` without `=path`".to_string()); + } + let name = name.to_string(); + externs.entry(name).or_default().insert(location); + } + Ok(Externs::new(externs)) +} diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index d6b0127e44..aac0f9f94e 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -28,7 +28,6 @@ use rustc_target::spec::TargetTriple; use syntax::ast::{self, Ident, NodeId}; use syntax::source_map; -use syntax::edition::Edition; use syntax::feature_gate::UnstableFeatures; use syntax::json::JsonEmitter; use syntax::ptr::P; @@ -43,9 +42,9 @@ use std::mem; use rustc_data_structures::sync::{self, Lrc}; use std::rc::Rc; use std::sync::Arc; -use std::path::PathBuf; use visit_ast::RustdocVisitor; +use config::{Options as RustdocOptions, RenderOptions}; use clean; use clean::{get_path_for_type, Clean, MAX_DEF_ID, AttributesExt}; use html::render::RenderInfo; @@ -320,46 +319,49 @@ pub fn new_handler(error_format: ErrorOutputType, ) } -pub fn run_core(search_paths: SearchPaths, - cfgs: Vec, - externs: config::Externs, - input: Input, - triple: Option, - maybe_sysroot: Option, - allow_warnings: bool, - crate_name: Option, - force_unstable_if_unmarked: bool, - edition: Edition, - cg: CodegenOptions, - error_format: ErrorOutputType, - cmd_lints: Vec<(String, lint::Level)>, - lint_cap: Option, - describe_lints: bool, - mut manual_passes: Vec, - mut default_passes: passes::DefaultPassOption, - treat_err_as_bug: bool, - ui_testing: bool, -) -> (clean::Crate, RenderInfo, Vec) { +pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOptions, Vec) { // Parse, resolve, and typecheck the given crate. - let cpath = match input { - Input::File(ref p) => Some(p.clone()), - _ => None - }; + let RustdocOptions { + input, + crate_name, + error_format, + libs, + externs, + cfgs, + codegen_options, + debugging_options, + target, + edition, + maybe_sysroot, + lint_opts, + describe_lints, + lint_cap, + mut default_passes, + mut manual_passes, + display_warnings, + render_options, + .. + } = options; + + let cpath = Some(input.clone()); + let input = Input::File(input); let intra_link_resolution_failure_name = lint::builtin::INTRA_DOC_LINK_RESOLUTION_FAILURE.name; let warnings_lint_name = lint::builtin::WARNINGS.name; let missing_docs = rustc_lint::builtin::MISSING_DOCS.name; let missing_doc_example = rustc_lint::builtin::MISSING_DOC_CODE_EXAMPLES.name; + let private_doc_tests = rustc_lint::builtin::PRIVATE_DOC_TESTS.name; // In addition to those specific lints, we also need to whitelist those given through // command line, otherwise they'll get ignored and we don't want that. let mut whitelisted_lints = vec![warnings_lint_name.to_owned(), intra_link_resolution_failure_name.to_owned(), missing_docs.to_owned(), - missing_doc_example.to_owned()]; + missing_doc_example.to_owned(), + private_doc_tests.to_owned()]; - whitelisted_lints.extend(cmd_lints.iter().map(|(lint, _)| lint).cloned()); + whitelisted_lints.extend(lint_opts.iter().map(|(lint, _)| lint).cloned()); let lints = lint::builtin::HardwiredLints.get_lints() .into_iter() @@ -372,33 +374,28 @@ pub fn run_core(search_paths: SearchPaths, Some((lint.name_lower(), lint::Allow)) } }) - .chain(cmd_lints.into_iter()) + .chain(lint_opts.into_iter()) .collect::>(); let host_triple = TargetTriple::from_triple(config::host_triple()); // plays with error output here! let sessopts = config::Options { maybe_sysroot, - search_paths, + search_paths: libs, crate_types: vec![config::CrateType::Rlib], - lint_opts: if !allow_warnings { + lint_opts: if !display_warnings { lints } else { vec![] }, lint_cap: Some(lint_cap.unwrap_or_else(|| lint::Forbid)), - cg, + cg: codegen_options, externs, - target_triple: triple.unwrap_or(host_triple), + target_triple: target.unwrap_or(host_triple), // Ensure that rustdoc works even if rustc is feature-staged unstable_features: UnstableFeatures::Allow, actually_rustdoc: true, - debugging_opts: config::DebuggingOptions { - force_unstable_if_unmarked, - treat_err_as_bug, - ui_testing, - ..config::basic_debugging_options() - }, + debugging_opts: debugging_options.clone(), error_format, edition, describe_lints, @@ -408,8 +405,8 @@ pub fn run_core(search_paths: SearchPaths, let source_map = Lrc::new(source_map::SourceMap::new(sessopts.file_path_mapping())); let diagnostic_handler = new_handler(error_format, Some(source_map.clone()), - treat_err_as_bug, - ui_testing); + debugging_options.treat_err_as_bug, + debugging_options.ui_testing); let mut sess = session::build_session_( sessopts, cpath, diagnostic_handler, source_map, @@ -621,7 +618,7 @@ pub fn run_core(search_paths: SearchPaths, ctxt.sess().abort_if_errors(); - (krate, ctxt.renderinfo.into_inner(), passes) + (krate, ctxt.renderinfo.into_inner(), render_options, passes) }), &sess) }) } diff --git a/src/librustdoc/externalfiles.rs b/src/librustdoc/externalfiles.rs index 9631ea059c..c7a2dd6da3 100644 --- a/src/librustdoc/externalfiles.rs +++ b/src/librustdoc/externalfiles.rs @@ -16,7 +16,7 @@ use syntax::feature_gate::UnstableFeatures; use html::markdown::{IdMap, ErrorCodes, Markdown}; use std::cell::RefCell; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct ExternalHtml { /// Content that will be included inline in the section of a /// rendered Markdown file or generated documentation diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index 87b4527a2a..8fb91cc23f 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -28,9 +28,12 @@ use syntax::parse; use syntax_pos::{Span, FileName}; /// Highlights `src`, returning the HTML output. -pub fn render_with_highlighting(src: &str, class: Option<&str>, - extension: Option<&str>, - tooltip: Option<(&str, &str)>) -> String { +pub fn render_with_highlighting( + src: &str, + class: Option<&str>, + extension: Option<&str>, + tooltip: Option<(&str, &str)>, +) -> String { debug!("highlighting: ================\n{}\n==============", src); let sess = parse::ParseSess::new(FilePathMapping::empty()); let fm = sess.source_map().new_source_file(FileName::Custom("stdin".to_string()), @@ -343,7 +346,7 @@ impl<'a> Classifier<'a> { token::Lifetime(..) => Class::Lifetime, token::Eof | token::Interpolated(..) | - token::Tilde | token::At | token::DotEq | token::SingleQuote => Class::None, + token::Tilde | token::At| token::SingleQuote => Class::None, }; // Anything that didn't return above is the simple case where we the @@ -384,9 +387,9 @@ impl Class { } fn write_header(class: Option<&str>, out: &mut dyn Write) -> io::Result<()> { - write!(out, "

\n", class.unwrap_or(""))
+    write!(out, "
\n", class.unwrap_or(""))
 }
 
 fn write_footer(out: &mut dyn Write) -> io::Result<()> {
-    write!(out, "
\n") + write!(out, "
\n") } diff --git a/src/librustdoc/html/layout.rs b/src/librustdoc/html/layout.rs index 6868c7707a..5263cfe7b0 100644 --- a/src/librustdoc/html/layout.rs +++ b/src/librustdoc/html/layout.rs @@ -33,7 +33,7 @@ pub struct Page<'a> { pub fn render( dst: &mut dyn io::Write, layout: &Layout, page: &Page, sidebar: &S, t: &T, - css_file_extension: bool, themes: &[PathBuf]) + css_file_extension: bool, themes: &[PathBuf], extra_scripts: &[&str]) -> io::Result<()> { write!(dst, @@ -149,6 +149,7 @@ pub fn render( \ \ \ + {extra_scripts}\ \ \ ", @@ -192,6 +193,11 @@ pub fn render( page.resource_suffix)) .collect::(), suffix=page.resource_suffix, + extra_scripts=extra_scripts.iter().map(|e| { + format!("", + root_path=page.root_path, + extra_script=e) + }).collect::(), ) } diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs index 22fa887c35..00ca4fed2f 100644 --- a/src/librustdoc/html/markdown.rs +++ b/src/librustdoc/html/markdown.rs @@ -399,7 +399,6 @@ impl<'a, I: Iterator>> SummaryLine<'a, I> { fn check_if_allowed_tag(t: &Tag) -> bool { match *t { Tag::Paragraph - | Tag::CodeBlock(_) | Tag::Item | Tag::Emphasis | Tag::Strong @@ -420,29 +419,36 @@ impl<'a, I: Iterator>> Iterator for SummaryLine<'a, I> { if !self.started { self.started = true; } - let event = self.inner.next(); - let mut is_start = true; - let is_allowed_tag = match event { - Some(Event::Start(ref c)) => { - self.depth += 1; - check_if_allowed_tag(c) - } - Some(Event::End(ref c)) => { - self.depth -= 1; - is_start = false; - check_if_allowed_tag(c) - } - _ => true, - }; - if is_allowed_tag == false { - if is_start { - Some(Event::Start(Tag::Paragraph)) + while let Some(event) = self.inner.next() { + let mut is_start = true; + let is_allowed_tag = match event { + Event::Start(Tag::CodeBlock(_)) | Event::End(Tag::CodeBlock(_)) => { + return None; + } + Event::Start(ref c) => { + self.depth += 1; + check_if_allowed_tag(c) + } + Event::End(ref c) => { + self.depth -= 1; + is_start = false; + check_if_allowed_tag(c) + } + _ => { + true + } + }; + return if is_allowed_tag == false { + if is_start { + Some(Event::Start(Tag::Paragraph)) + } else { + Some(Event::End(Tag::Paragraph)) + } } else { - Some(Event::End(Tag::Paragraph)) - } - } else { - event + Some(event) + }; } + None } } @@ -905,7 +911,7 @@ pub fn markdown_links(md: &str) -> Vec<(String, Option>)> { links } -#[derive(Default)] +#[derive(Clone, Default, Debug)] pub struct IdMap { map: FxHashMap, } diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 9dfe77338a..2c4ddf38e9 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -52,8 +52,7 @@ use std::str; use std::sync::Arc; use std::rc::Rc; -use externalfiles::ExternalHtml; - +use errors; use serialize::json::{ToJson, Json, as_json}; use syntax::ast; use syntax::ext::base::MacroKind; @@ -67,6 +66,7 @@ use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::flock; use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability}; +use config::RenderOptions; use doctree; use fold::DocFolder; use html::escape::Escape; @@ -76,7 +76,7 @@ use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace}; use html::format::fmt_impl_for_trait_page; use html::item_type::ItemType; use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine, ErrorCodes, IdMap}; -use html::{highlight, layout}; +use html::{highlight, layout, static_files}; use minifier; @@ -490,18 +490,25 @@ pub fn initial_ids() -> Vec { /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, - extern_urls: BTreeMap, - external_html: &ExternalHtml, - playground_url: Option, - dst: PathBuf, - resource_suffix: String, + options: RenderOptions, passes: FxHashSet, - css_file_extension: Option, renderinfo: RenderInfo, - sort_modules_alphabetically: bool, - themes: Vec, - enable_minification: bool, - id_map: IdMap) -> Result<(), Error> { + diag: &errors::Handler) -> Result<(), Error> { + // need to save a copy of the options for rendering the index page + let md_opts = options.clone(); + let RenderOptions { + output, + external_html, + id_map, + playground_url, + sort_modules_alphabetically, + themes, + extension_css, + extern_html_root_urls, + resource_suffix, + .. + } = options; + let src_root = match krate.src { FileName::Real(ref p) => match p.parent() { Some(p) => p.to_path_buf(), @@ -518,10 +525,10 @@ pub fn run(mut krate: clean::Crate, layout: layout::Layout { logo: String::new(), favicon: String::new(), - external_html: external_html.clone(), + external_html, krate: krate.name.clone(), }, - css_file_extension, + css_file_extension: extension_css, created_dirs: Default::default(), sort_modules_alphabetically, themes, @@ -563,6 +570,7 @@ pub fn run(mut krate: clean::Crate, } } } + let dst = output; try_err!(fs::create_dir_all(&dst), &dst); krate = render_sources(&dst, &mut scx, krate)?; let cx = Context { @@ -625,7 +633,7 @@ pub fn run(mut krate: clean::Crate, }, _ => PathBuf::new(), }; - let extern_url = extern_urls.get(&e.name).map(|u| &**u); + let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u); cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, extern_url, &cx.dst))); @@ -666,7 +674,7 @@ pub fn run(mut krate: clean::Crate, CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone()); CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear()); - write_shared(&cx, &krate, &*cache, index, enable_minification)?; + write_shared(&cx, &krate, &*cache, index, &md_opts, diag)?; // And finally render the whole crate's documentation cx.krate(krate) @@ -742,11 +750,14 @@ fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { Json::Object(crate_data)) } -fn write_shared(cx: &Context, - krate: &clean::Crate, - cache: &Cache, - search_index: String, - enable_minification: bool) -> Result<(), Error> { +fn write_shared( + cx: &Context, + krate: &clean::Crate, + cache: &Cache, + search_index: String, + options: &RenderOptions, + diag: &errors::Handler, +) -> Result<(), Error> { // Write out the shared files. Note that these are shared among all rustdoc // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. @@ -756,11 +767,11 @@ fn write_shared(cx: &Context, // overwrite them anyway to make sure that they're fresh and up-to-date. write_minify(cx.dst.join(&format!("rustdoc{}.css", cx.shared.resource_suffix)), - include_str!("static/rustdoc.css"), - enable_minification)?; + static_files::RUSTDOC_CSS, + options.enable_minification)?; write_minify(cx.dst.join(&format!("settings{}.css", cx.shared.resource_suffix)), - include_str!("static/settings.css"), - enable_minification)?; + static_files::SETTINGS_CSS, + options.enable_minification)?; // To avoid "light.css" to be overwritten, we'll first run over the received themes and only // then we'll run over the "official" styles. @@ -779,16 +790,16 @@ fn write_shared(cx: &Context, } write(cx.dst.join(&format!("brush{}.svg", cx.shared.resource_suffix)), - include_bytes!("static/brush.svg"))?; + static_files::BRUSH_SVG)?; write(cx.dst.join(&format!("wheel{}.svg", cx.shared.resource_suffix)), - include_bytes!("static/wheel.svg"))?; + static_files::WHEEL_SVG)?; write_minify(cx.dst.join(&format!("light{}.css", cx.shared.resource_suffix)), - include_str!("static/themes/light.css"), - enable_minification)?; + static_files::themes::LIGHT, + options.enable_minification)?; themes.insert("light".to_owned()); write_minify(cx.dst.join(&format!("dark{}.css", cx.shared.resource_suffix)), - include_str!("static/themes/dark.css"), - enable_minification)?; + static_files::themes::DARK, + options.enable_minification)?; themes.insert("dark".to_owned()); let mut themes: Vec<&String> = themes.iter().collect(); @@ -843,67 +854,73 @@ themePicker.onblur = handleThemeButtonsBlur; )?; write_minify(cx.dst.join(&format!("main{}.js", cx.shared.resource_suffix)), - include_str!("static/main.js"), - enable_minification)?; + static_files::MAIN_JS, + options.enable_minification)?; write_minify(cx.dst.join(&format!("settings{}.js", cx.shared.resource_suffix)), - include_str!("static/settings.js"), - enable_minification)?; + static_files::SETTINGS_JS, + options.enable_minification)?; + if cx.shared.include_sources { + write_minify(cx.dst.join(&format!("source-script{}.js", cx.shared.resource_suffix)), + static_files::sidebar::SOURCE_SCRIPT, + options.enable_minification)?; + } { let mut data = format!("var resourcesSuffix = \"{}\";\n", cx.shared.resource_suffix); - data.push_str(include_str!("static/storage.js")); + data.push_str(static_files::STORAGE_JS); write_minify(cx.dst.join(&format!("storage{}.js", cx.shared.resource_suffix)), &data, - enable_minification)?; + options.enable_minification)?; } if let Some(ref css) = cx.shared.css_file_extension { let out = cx.dst.join(&format!("theme{}.css", cx.shared.resource_suffix)); - if !enable_minification { + if !options.enable_minification { try_err!(fs::copy(css, out), css); } else { let mut f = try_err!(File::open(css), css); let mut buffer = String::with_capacity(1000); try_err!(f.read_to_string(&mut buffer), css); - write_minify(out, &buffer, enable_minification)?; + write_minify(out, &buffer, options.enable_minification)?; } } write_minify(cx.dst.join(&format!("normalize{}.css", cx.shared.resource_suffix)), - include_str!("static/normalize.css"), - enable_minification)?; + static_files::NORMALIZE_CSS, + options.enable_minification)?; write(cx.dst.join("FiraSans-Regular.woff"), - include_bytes!("static/FiraSans-Regular.woff"))?; + static_files::fira_sans::REGULAR)?; write(cx.dst.join("FiraSans-Medium.woff"), - include_bytes!("static/FiraSans-Medium.woff"))?; + static_files::fira_sans::MEDIUM)?; write(cx.dst.join("FiraSans-LICENSE.txt"), - include_bytes!("static/FiraSans-LICENSE.txt"))?; + static_files::fira_sans::LICENSE)?; write(cx.dst.join("Heuristica-Italic.woff"), - include_bytes!("static/Heuristica-Italic.woff"))?; + static_files::heuristica::ITALIC)?; write(cx.dst.join("Heuristica-LICENSE.txt"), - include_bytes!("static/Heuristica-LICENSE.txt"))?; + static_files::heuristica::LICENSE)?; write(cx.dst.join("SourceSerifPro-Regular.woff"), - include_bytes!("static/SourceSerifPro-Regular.woff"))?; + static_files::source_serif_pro::REGULAR)?; write(cx.dst.join("SourceSerifPro-Bold.woff"), - include_bytes!("static/SourceSerifPro-Bold.woff"))?; + static_files::source_serif_pro::BOLD)?; write(cx.dst.join("SourceSerifPro-LICENSE.txt"), - include_bytes!("static/SourceSerifPro-LICENSE.txt"))?; + static_files::source_serif_pro::LICENSE)?; write(cx.dst.join("SourceCodePro-Regular.woff"), - include_bytes!("static/SourceCodePro-Regular.woff"))?; + static_files::source_code_pro::REGULAR)?; write(cx.dst.join("SourceCodePro-Semibold.woff"), - include_bytes!("static/SourceCodePro-Semibold.woff"))?; + static_files::source_code_pro::SEMIBOLD)?; write(cx.dst.join("SourceCodePro-LICENSE.txt"), - include_bytes!("static/SourceCodePro-LICENSE.txt"))?; + static_files::source_code_pro::LICENSE)?; write(cx.dst.join("LICENSE-MIT.txt"), - include_bytes!("static/LICENSE-MIT.txt"))?; + static_files::LICENSE_MIT)?; write(cx.dst.join("LICENSE-APACHE.txt"), - include_bytes!("static/LICENSE-APACHE.txt"))?; + static_files::LICENSE_APACHE)?; write(cx.dst.join("COPYRIGHT.txt"), - include_bytes!("static/COPYRIGHT.txt"))?; + static_files::COPYRIGHT)?; - fn collect(path: &Path, krate: &str, key: &str) -> io::Result> { + fn collect(path: &Path, krate: &str, key: &str) -> io::Result<(Vec, Vec)> { let mut ret = Vec::new(); + let mut krates = Vec::new(); if path.exists() { for line in BufReader::new(File::open(path)?).lines() { let line = line?; @@ -914,9 +931,13 @@ themePicker.onblur = handleThemeButtonsBlur; continue; } ret.push(line.to_string()); + krates.push(line[key.len() + 2..].split('"') + .next() + .map(|s| s.to_owned()) + .unwrap_or_else(|| String::new())); } } - Ok(ret) + Ok((ret, krates)) } fn show_item(item: &IndexItem, krate: &str) -> String { @@ -931,7 +952,7 @@ themePicker.onblur = handleThemeButtonsBlur; let dst = cx.dst.join("aliases.js"); { - let mut all_aliases = try_err!(collect(&dst, &krate.name, "ALIASES"), &dst); + let (mut all_aliases, _) = try_err!(collect(&dst, &krate.name, "ALIASES"), &dst); let mut w = try_err!(File::create(&dst), &dst); let mut output = String::with_capacity(100); for (alias, items) in &cache.aliases { @@ -953,22 +974,140 @@ themePicker.onblur = handleThemeButtonsBlur; } } + use std::ffi::OsString; + + #[derive(Debug)] + struct Hierarchy { + elem: OsString, + children: FxHashMap, + elems: FxHashSet, + } + + impl Hierarchy { + fn new(elem: OsString) -> Hierarchy { + Hierarchy { + elem, + children: FxHashMap::default(), + elems: FxHashSet::default(), + } + } + + fn to_json_string(&self) -> String { + let mut subs: Vec<&Hierarchy> = self.children.values().collect(); + subs.sort_unstable_by(|a, b| a.elem.cmp(&b.elem)); + let mut files = self.elems.iter() + .map(|s| format!("\"{}\"", + s.to_str() + .expect("invalid osstring conversion"))) + .collect::>(); + files.sort_unstable_by(|a, b| a.cmp(b)); + // FIXME(imperio): we could avoid to generate "dirs" and "files" if they're empty. + format!("{{\"name\":\"{name}\",\"dirs\":[{subs}],\"files\":[{files}]}}", + name=self.elem.to_str().expect("invalid osstring conversion"), + subs=subs.iter().map(|s| s.to_json_string()).collect::>().join(","), + files=files.join(",")) + } + } + + if cx.shared.include_sources { + use std::path::Component; + + let mut hierarchy = Hierarchy::new(OsString::new()); + for source in cx.shared.local_sources.iter() + .filter_map(|p| p.0.strip_prefix(&cx.shared.src_root) + .ok()) { + let mut h = &mut hierarchy; + let mut elems = source.components() + .filter_map(|s| { + match s { + Component::Normal(s) => Some(s.to_owned()), + _ => None, + } + }) + .peekable(); + loop { + let cur_elem = elems.next().expect("empty file path"); + if elems.peek().is_none() { + h.elems.insert(cur_elem); + break; + } else { + let e = cur_elem.clone(); + h.children.entry(cur_elem.clone()).or_insert_with(|| Hierarchy::new(e)); + h = h.children.get_mut(&cur_elem).expect("not found child"); + } + } + } + + let dst = cx.dst.join("source-files.js"); + let (mut all_sources, _krates) = try_err!(collect(&dst, &krate.name, "sourcesIndex"), &dst); + all_sources.push(format!("sourcesIndex['{}'] = {};", + &krate.name, + hierarchy.to_json_string())); + all_sources.sort(); + let mut w = try_err!(File::create(&dst), &dst); + try_err!(writeln!(&mut w, + "var N = null;var sourcesIndex = {{}};\n{}", + all_sources.join("\n")), + &dst); + } + // Update the search index let dst = cx.dst.join("search-index.js"); - let mut all_indexes = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst); + let (mut all_indexes, mut krates) = try_err!(collect(&dst, &krate.name, "searchIndex"), &dst); all_indexes.push(search_index); + // Sort the indexes by crate so the file will be generated identically even // with rustdoc running in parallel. all_indexes.sort(); let mut w = try_err!(File::create(&dst), &dst); try_err!(writeln!(&mut w, "var N = null;var searchIndex = {{}};"), &dst); for index in &all_indexes { - try_err!(write_minify_replacer(&mut w, &*index, enable_minification, + try_err!(write_minify_replacer(&mut w, &*index, options.enable_minification, &[(minifier::js::Keyword::Null, "N")]), &dst); } try_err!(writeln!(&mut w, "initSearch(searchIndex);"), &dst); + if options.enable_index_page { + if let Some(index_page) = options.index_page.clone() { + let mut md_opts = options.clone(); + md_opts.output = cx.dst.clone(); + md_opts.external_html = (*cx.shared).layout.external_html.clone(); + + ::markdown::render(index_page, md_opts, diag); + } else { + let dst = cx.dst.join("index.html"); + let mut w = BufWriter::new(try_err!(File::create(&dst), &dst)); + let page = layout::Page { + title: "Index of crates", + css_class: "mod", + root_path: "./", + description: "List of crates", + keywords: BASIC_KEYWORDS, + resource_suffix: &cx.shared.resource_suffix, + }; + krates.push(krate.name.clone()); + krates.sort(); + krates.dedup(); + + let content = format!( +"

\ + List of all crates\ +

    {}
", + krates + .iter() + .map(|s| { + format!("
", s, s) + }) + .collect::()); + try_err!(layout::render(&mut w, &cx.shared.layout, + &page, &(""), &content, + cx.shared.css_file_extension.is_some(), + &cx.shared.themes, &[]), &dst); + try_err!(w.flush(), &dst); + } + } + // Update the list of all implementors for traits let dst = cx.dst.join("implementors"); for (&did, imps) in &cache.implementors { @@ -1022,7 +1161,8 @@ themePicker.onblur = handleThemeButtonsBlur; remote_item_type.css_class(), remote_path[remote_path.len() - 1])); - let mut all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), &mydst); + let (mut all_implementors, _) = try_err!(collect(&mydst, &krate.name, "implementors"), + &mydst); all_implementors.push(implementors); // Sort the implementors by crate so the file will be generated // identically even with rustdoc running in parallel. @@ -1235,7 +1375,8 @@ impl<'a> SourceCollector<'a> { layout::render(&mut w, &self.scx.layout, &page, &(""), &Source(contents), self.scx.css_file_extension.is_some(), - &self.scx.themes)?; + &self.scx.themes, &["source-files", + &format!("source-script{}", page.resource_suffix)])?; w.flush()?; self.scx.local_sources.insert(p.clone(), href); Ok(()) @@ -1709,6 +1850,7 @@ impl<'a> Settings<'a> { ("method-docs", "Auto-hide item methods' documentation", false), ("go-to-only-result", "Directly go to item in search if there is only one result", false), + ("line-numbers", "Show line numbers on code examples", false), ], root_path, suffix, @@ -1832,7 +1974,7 @@ impl Context { try_err!(layout::render(&mut w, &self.shared.layout, &page, &sidebar, &all, self.shared.css_file_extension.is_some(), - &self.shared.themes), + &self.shared.themes, &[]), &final_file); // Generating settings page. @@ -1852,7 +1994,7 @@ impl Context { try_err!(layout::render(&mut w, &layout, &page, &sidebar, &settings, self.shared.css_file_extension.is_some(), - &themes), + &themes, &[]), &settings_file); Ok(()) @@ -1910,7 +2052,7 @@ impl Context { &Sidebar{ cx: self, item: it }, &Item{ cx: self, item: it }, self.shared.css_file_extension.is_some(), - &self.shared.themes)?; + &self.shared.themes, &[])?; } else { let mut url = self.root_path(); if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { @@ -2259,8 +2401,8 @@ fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Re if let Some(ref name) = item.name { info!("Documenting {}", name); } - document_stability(w, cx, item)?; - document_full(w, item, cx, "")?; + document_stability(w, cx, item, false)?; + document_full(w, item, cx, "", false)?; Ok(()) } @@ -2269,15 +2411,19 @@ fn render_markdown(w: &mut fmt::Formatter, cx: &Context, md_text: &str, links: Vec<(String, String)>, - prefix: &str) + prefix: &str, + is_hidden: bool) -> fmt::Result { let mut ids = cx.id_map.borrow_mut(); - write!(w, "
{}{}
", - prefix, Markdown(md_text, &links, RefCell::new(&mut ids), cx.codes)) + write!(w, "
{}{}
", + if is_hidden { " hidden" } else { "" }, + prefix, + Markdown(md_text, &links, RefCell::new(&mut ids), + cx.codes)) } fn document_short(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link: AssocItemLink, - prefix: &str) -> fmt::Result { + prefix: &str, is_hidden: bool) -> fmt::Result { if let Some(s) = item.doc_value() { let markdown = if s.contains('\n') { format!("{} [Read more]({})", @@ -2285,28 +2431,33 @@ fn document_short(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, link } else { plain_summary_line(Some(s)) }; - render_markdown(w, cx, &markdown, item.links(), prefix)?; + render_markdown(w, cx, &markdown, item.links(), prefix, is_hidden)?; } else if !prefix.is_empty() { - write!(w, "
{}
", prefix)?; + write!(w, "
{}
", + if is_hidden { " hidden" } else { "" }, + prefix)?; } Ok(()) } fn document_full(w: &mut fmt::Formatter, item: &clean::Item, - cx: &Context, prefix: &str) -> fmt::Result { + cx: &Context, prefix: &str, is_hidden: bool) -> fmt::Result { if let Some(s) = cx.shared.maybe_collapsed_doc_value(item) { debug!("Doc block: =====\n{}\n=====", s); - render_markdown(w, cx, &*s, item.links(), prefix)?; + render_markdown(w, cx, &*s, item.links(), prefix, is_hidden)?; } else if !prefix.is_empty() { - write!(w, "
{}
", prefix)?; + write!(w, "
{}
", + if is_hidden { " hidden" } else { "" }, + prefix)?; } Ok(()) } -fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { +fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, + is_hidden: bool) -> fmt::Result { let stabilities = short_stability(item, cx, true); if !stabilities.is_empty() { - write!(w, "
")?; + write!(w, "
", if is_hidden { " hidden" } else { "" })?; for stability in stabilities { write!(w, "{}", stability)?; } @@ -2517,24 +2668,39 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, _ => "", }; + let stab = myitem.stability_class(); + let add = if stab.is_some() { + " " + } else { + "" + }; + let doc_value = myitem.doc_value().unwrap_or(""); - write!(w, " - - {name}{unsafety_flag} - - {stab_docs} {docs} - + write!(w, "\ + \ + {name}{unsafety_flag}\ + {stab_docs}{docs}\ + \ ", name = *myitem.name.as_ref().unwrap(), stab_docs = stab_docs, docs = MarkdownSummaryLine(doc_value, &myitem.links()), class = myitem.type_(), - stab = myitem.stability_class().unwrap_or(String::new()), + add = add, + stab = stab.unwrap_or_else(|| String::new()), unsafety_flag = unsafety_flag, href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()), - title_type = myitem.type_(), - title = full_path(cx, myitem))?; + title = [full_path(cx, myitem), myitem.type_().to_string()] + .iter() + .filter_map(|s| if !s.is_empty() { + Some(s.as_str()) + } else { + None + }) + .collect::>() + .join(" "), + )?; } } } @@ -3378,10 +3544,10 @@ fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, let variant_id = cx.derive_id(format!("{}.{}.fields", ItemType::Variant, variant.name.as_ref().unwrap())); - write!(w, "", + write!(w, "", id = variant_id)?; - write!(w, "

Fields of {name}

\n - ", name = variant.name.as_ref().unwrap())?; + write!(w, "

Fields of {name}

", + name = variant.name.as_ref().unwrap())?; for field in &s.fields { use clean::StructFieldItem; if let StructFieldItem(ref ty) = field.inner { @@ -3393,19 +3559,18 @@ fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, ItemType::Variant.name_space(), field.name.as_ref().unwrap(), ItemType::StructField.name_space())); - write!(w, "
")?; } } - write!(w, "
\ - ", + write!(w, "\ + \ + ", id = id, ns_id = ns_id, f = field.name.as_ref().unwrap(), t = *ty)?; document(w, cx, field)?; - write!(w, "
")?; + write!(w, "
")?; } render_stability_since(w, variant, it)?; } @@ -3872,14 +4037,21 @@ fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLi RenderMode::ForDeref { mut_: deref_mut_ } => should_render_item(&item, deref_mut_), }; + let (is_hidden, extra_class) = if trait_.is_none() || + item.doc_value().is_some() || + item.inner.is_associated() { + (false, "") + } else { + (true, " hidden") + }; match item.inner { clean::MethodItem(clean::Method { ref decl, .. }) | - clean::TyMethodItem(clean::TyMethod{ ref decl, .. }) => { + clean::TyMethodItem(clean::TyMethod { ref decl, .. }) => { // Only render when the method is not static or we allow static methods if render_method_item { let id = cx.derive_id(format!("{}.{}", item_type, name)); let ns_id = cx.derive_id(format!("{}.{}", name, item_type.name_space())); - write!(w, "

", id, item_type)?; + write!(w, "

", id, item_type, extra_class)?; write!(w, "{}", spotlight_decl(decl)?)?; write!(w, "

  • {}