New upstream version 1.47.0~beta.2+dfsg1

This commit is contained in:
Ximin Luo 2020-09-04 16:38:49 +01:00
parent f96527815a
commit 3dfed10e78
6382 changed files with 551018 additions and 675592 deletions

View File

@ -4,5 +4,12 @@ Thank you for your interest in contributing to Rust!
To get started, read the [Getting Started] guide in the [rustc-dev-guide].
## Bug reports
Did a compiler error message tell you to come here? If you want to create an ICE report,
refer to [this section][contributing-bug-reports] and [open an issue][issue template].
[Getting Started]: https://rustc-dev-guide.rust-lang.org/getting-started.html
[rustc-dev-guide]: https://rustc-dev-guide.rust-lang.org/
[contributing-bug-reports]: https://rustc-dev-guide.rust-lang.org/contributing.html#bug-reports
[issue template]: https://github.com/rust-lang/rust/issues/new/choose

1908
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,9 +2,9 @@
members = [
"src/bootstrap",
"src/rustc",
"src/libstd",
"src/libtest",
"src/librustc_codegen_llvm",
"library/std",
"library/test",
"src/tools/cargotest",
"src/tools/clippy",
"src/tools/compiletest",
@ -13,10 +13,12 @@ members = [
"src/tools/rustbook",
"src/tools/unstable-book-gen",
"src/tools/tidy",
"src/tools/tier-check",
"src/tools/build-manifest",
"src/tools/remote-test-client",
"src/tools/remote-test-server",
"src/tools/rust-installer",
"src/tools/rust-demangler",
"src/tools/cargo",
"src/tools/rustdoc",
"src/tools/rls",
@ -55,6 +57,18 @@ overflow-checks = false
# per-crate configuration isn't specifiable in the environment.
codegen-units = 10000
# These dependencies of the standard library implement symbolication for
# backtraces on most platforms. Their debuginfo causes both linking to be slower
# (more data to chew through) and binaries to be larger without really all that
# much benefit. This section turns them all to down to have no debuginfo which
# helps to improve link times a little bit.
[profile.release.package]
addr2line.debug = 0
adler.debug = 0
gimli.debug = 0
miniz_oxide.debug = 0
object.debug = 0
# We want the RLS to use the version of Cargo that we've got vendored in this
# repository to ensure that the same exact version of Cargo is used by both the
# RLS and the Cargo binary itself. The RLS depends on Cargo as a git repository
@ -63,21 +77,28 @@ codegen-units = 10000
[patch."https://github.com/rust-lang/cargo"]
cargo = { path = "src/tools/cargo" }
[patch.crates-io]
[patch."https://github.com/rust-lang/rustfmt"]
# Similar to Cargo above we want the RLS to use a vendored version of `rustfmt`
# that we're shipping as well (to ensure that the rustfmt in RLS and the
# `rustfmt` executable are the same exact version).
rustfmt-nightly = { path = "src/tools/rustfmt" }
[patch.crates-io]
# See comments in `src/tools/rustc-workspace-hack/README.md` for what's going on
# here
rustc-workspace-hack = { path = 'src/tools/rustc-workspace-hack' }
# See comments in `tools/rustc-std-workspace-core/README.md` for what's going on
# See comments in `library/rustc-std-workspace-core/README.md` for what's going on
# here
rustc-std-workspace-core = { path = 'src/tools/rustc-std-workspace-core' }
rustc-std-workspace-alloc = { path = 'src/tools/rustc-std-workspace-alloc' }
rustc-std-workspace-std = { path = 'src/tools/rustc-std-workspace-std' }
rustc-std-workspace-core = { path = 'library/rustc-std-workspace-core' }
rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
# This crate's integration with libstd is a bit wonky, so we use a submodule
# instead of a crates.io dependency. Make sure everything else in the repo is
# also using the submodule, however, so we can avoid duplicate copies of the
# source code for this crate.
backtrace = { path = "library/backtrace" }
[patch."https://github.com/rust-lang/rust-clippy"]
clippy_lints = { path = "src/tools/clippy/clippy_lints" }

View File

@ -1,9 +1,9 @@
<a href = "https://www.rust-lang.org/">
<a href = "https://www.rust-lang.org/">
<img width = "90%" height = "auto" src = "https://img.shields.io/badge/Rust-Programming%20Language-black?style=flat&logo=rust" alt = "The Rust Programming Language">
</a>
This is the main source code repository for [Rust]. It contains the compiler,
standard library, and documentation.
standard library, and documentation.
[Rust]: https://www.rust-lang.org
@ -23,7 +23,7 @@ Read ["Installation"] from [The Book].
section.**
The Rust build system uses a Python script called `x.py` to build the compiler,
which manages the bootstrapping process. More information about it can be found
which manages the bootstrapping process. More information about it can be found
by running `./x.py --help` or reading the [rustc dev guide][rustcguidebuild].
[gettingstarted]: https://rustc-dev-guide.rust-lang.org/getting-started.html
@ -157,17 +157,6 @@ by manually calling the appropriate vcvars file before running the bootstrap.
> python x.py build
```
### Building rustc with older host toolchains
It is still possible to build Rust with the older toolchain versions listed below, but only if the
LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN option is set to true in the config.toml file.
* Clang 3.1
* Apple Clang 3.1
* GCC 4.8
* Visual Studio 2015 (Update 3)
Toolchain versions older than what is listed above cannot be used to build rustc.
#### Specifying an ABI
Each specific ABI can also be used from either environment (for example, using

View File

@ -341,7 +341,10 @@
# Debuginfo for tests run with compiletest is not controlled by this option
# and needs to be enabled separately with `debuginfo-level-tests`.
#
# Defaults to 2 if debug is true
# Note that debuginfo-level = 2 generates several gigabytes of debuginfo
# and will slow down the linking process significantly.
#
# Defaults to 1 if debug is true
#debuginfo-level = 0
# Debuginfo level for the compiler.
@ -391,8 +394,7 @@
# desired in distributions, for example.
#rpath = true
# Emits extraneous output from tests to ensure that failures of the test
# harness are debuggable just from logfiles.
# Emits extra output from tests so test failures are debuggable just from logfiles.
#verbose-tests = false
# Flag indicating whether tests are compiled with optimizations (the -O flag).
@ -431,7 +433,7 @@
#
# LLD will not be used if we're cross linking or running tests.
#
# Explicitly setting the linker for a target will override this option.
# Explicitly setting the linker for a target will override this option when targeting MSVC.
#use-lld = false
# Indicates whether some LLVM tools, like llvm-objdump, will be made available in the
@ -454,8 +456,7 @@
# instead of LLVM's default of 100.
#thin-lto-import-instr-limit = 100
# Map all debuginfo paths for libstd and crates to `/rust/$sha/$crate/...`,
# generally only set for releases
# Map debuginfo paths to `/rust/$sha/...`, generally only set for releases
#remap-debuginfo = false
# Link the compiler against `jemalloc`, where on Linux and OSX it should
@ -474,6 +475,10 @@
# This only applies from stage 1 onwards, and only for Windows targets.
#control-flow-guard = false
# Enable symbol-mangling-version v0. This can be helpful when profiling rustc,
# as generics will be preserved in symbols (rather than erased into opaque T).
#new-symbol-mangling = false
# =============================================================================
# Options for specific targets
#
@ -502,7 +507,7 @@
# Linker to be used to link Rust code. Note that the
# default value is platform specific, and if not specified it may also depend on
# what platform is crossing to what platform.
# Setting this will override the `use-lld` option for Rust code.
# Setting this will override the `use-lld` option for Rust code when targeting MSVC.
#linker = "cc"
# Path to the `llvm-config` binary of the installation of a custom LLVM to link

View File

@ -1 +1 @@
04488afe34512aa4c33566eb16d8c912a3ae04f9
84b047bf64dfcfa12867781e9c23dfa4f2e6082c

33
library/alloc/Cargo.toml Normal file
View File

@ -0,0 +1,33 @@
[package]
authors = ["The Rust Project Developers"]
name = "alloc"
version = "0.0.0"
autotests = false
autobenches = false
edition = "2018"
[dependencies]
core = { path = "../core" }
compiler_builtins = { version = "0.1.10", features = ['rustc-dep-of-std'] }
[dev-dependencies]
rand = "0.7"
rand_xorshift = "0.2"
[[test]]
name = "collectionstests"
path = "tests/lib.rs"
[[bench]]
name = "collectionsbenches"
path = "benches/lib.rs"
test = true
[[bench]]
name = "vec_deque_append_bench"
path = "benches/vec_deque_append.rs"
harness = false
[features]
compiler-builtins-mem = ['compiler_builtins/mem']
compiler-builtins-c = ["compiler_builtins/c"]

View File

@ -0,0 +1,586 @@
use std::collections::BTreeMap;
use std::iter::Iterator;
use std::ops::RangeBounds;
use std::vec::Vec;
use rand::{seq::SliceRandom, thread_rng, Rng};
use test::{black_box, Bencher};
macro_rules! map_insert_rand_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let n: usize = $n;
let mut map = $map::new();
// setup
let mut rng = thread_rng();
for _ in 0..n {
let i = rng.gen::<usize>() % n;
map.insert(i, i);
}
// measure
b.iter(|| {
let k = rng.gen::<usize>() % n;
map.insert(k, k);
map.remove(&k);
});
black_box(map);
}
};
}
macro_rules! map_insert_seq_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let mut map = $map::new();
let n: usize = $n;
// setup
for i in 0..n {
map.insert(i * 2, i * 2);
}
// measure
let mut i = 1;
b.iter(|| {
map.insert(i, i);
map.remove(&i);
i = (i + 2) % n;
});
black_box(map);
}
};
}
macro_rules! map_find_rand_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let mut map = $map::new();
let n: usize = $n;
// setup
let mut rng = thread_rng();
let mut keys: Vec<_> = (0..n).map(|_| rng.gen::<usize>() % n).collect();
for &k in &keys {
map.insert(k, k);
}
keys.shuffle(&mut rng);
// measure
let mut i = 0;
b.iter(|| {
let t = map.get(&keys[i]);
i = (i + 1) % n;
black_box(t);
})
}
};
}
macro_rules! map_find_seq_bench {
($name: ident, $n: expr, $map: ident) => {
#[bench]
pub fn $name(b: &mut Bencher) {
let mut map = $map::new();
let n: usize = $n;
// setup
for i in 0..n {
map.insert(i, i);
}
// measure
let mut i = 0;
b.iter(|| {
let x = map.get(&i);
i = (i + 1) % n;
black_box(x);
})
}
};
}
map_insert_rand_bench! {insert_rand_100, 100, BTreeMap}
map_insert_rand_bench! {insert_rand_10_000, 10_000, BTreeMap}
map_insert_seq_bench! {insert_seq_100, 100, BTreeMap}
map_insert_seq_bench! {insert_seq_10_000, 10_000, BTreeMap}
map_find_rand_bench! {find_rand_100, 100, BTreeMap}
map_find_rand_bench! {find_rand_10_000, 10_000, BTreeMap}
map_find_seq_bench! {find_seq_100, 100, BTreeMap}
map_find_seq_bench! {find_seq_10_000, 10_000, BTreeMap}
fn bench_iteration(b: &mut Bencher, size: i32) {
let mut map = BTreeMap::<i32, i32>::new();
let mut rng = thread_rng();
for _ in 0..size {
map.insert(rng.gen(), rng.gen());
}
b.iter(|| {
for entry in &map {
black_box(entry);
}
});
}
#[bench]
pub fn iteration_20(b: &mut Bencher) {
bench_iteration(b, 20);
}
#[bench]
pub fn iteration_1000(b: &mut Bencher) {
bench_iteration(b, 1000);
}
#[bench]
pub fn iteration_100000(b: &mut Bencher) {
bench_iteration(b, 100000);
}
fn bench_iteration_mut(b: &mut Bencher, size: i32) {
let mut map = BTreeMap::<i32, i32>::new();
let mut rng = thread_rng();
for _ in 0..size {
map.insert(rng.gen(), rng.gen());
}
b.iter(|| {
for kv in map.iter_mut() {
black_box(kv);
}
});
}
#[bench]
pub fn iteration_mut_20(b: &mut Bencher) {
bench_iteration_mut(b, 20);
}
#[bench]
pub fn iteration_mut_1000(b: &mut Bencher) {
bench_iteration_mut(b, 1000);
}
#[bench]
pub fn iteration_mut_100000(b: &mut Bencher) {
bench_iteration_mut(b, 100000);
}
fn bench_first_and_last(b: &mut Bencher, size: i32) {
let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
b.iter(|| {
for _ in 0..10 {
black_box(map.first_key_value());
black_box(map.last_key_value());
}
});
}
#[bench]
pub fn first_and_last_0(b: &mut Bencher) {
bench_first_and_last(b, 0);
}
#[bench]
pub fn first_and_last_100(b: &mut Bencher) {
bench_first_and_last(b, 100);
}
#[bench]
pub fn first_and_last_10k(b: &mut Bencher) {
bench_first_and_last(b, 10_000);
}
const BENCH_RANGE_SIZE: i32 = 145;
const BENCH_RANGE_COUNT: i32 = BENCH_RANGE_SIZE * (BENCH_RANGE_SIZE - 1) / 2;
fn bench_range<F, R>(b: &mut Bencher, f: F)
where
F: Fn(i32, i32) -> R,
R: RangeBounds<i32>,
{
let map: BTreeMap<_, _> = (0..BENCH_RANGE_SIZE).map(|i| (i, i)).collect();
b.iter(|| {
let mut c = 0;
for i in 0..BENCH_RANGE_SIZE {
for j in i + 1..BENCH_RANGE_SIZE {
black_box(map.range(f(i, j)));
c += 1;
}
}
debug_assert_eq!(c, BENCH_RANGE_COUNT);
});
}
#[bench]
pub fn range_included_excluded(b: &mut Bencher) {
bench_range(b, |i, j| i..j);
}
#[bench]
pub fn range_included_included(b: &mut Bencher) {
bench_range(b, |i, j| i..=j);
}
#[bench]
pub fn range_included_unbounded(b: &mut Bencher) {
bench_range(b, |i, _| i..);
}
#[bench]
pub fn range_unbounded_unbounded(b: &mut Bencher) {
bench_range(b, |_, _| ..);
}
fn bench_iter(b: &mut Bencher, repeats: i32, size: i32) {
let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
b.iter(|| {
for _ in 0..repeats {
black_box(map.iter());
}
});
}
/// Contrast range_unbounded_unbounded with `iter()`.
#[bench]
pub fn range_unbounded_vs_iter(b: &mut Bencher) {
bench_iter(b, BENCH_RANGE_COUNT, BENCH_RANGE_SIZE);
}
#[bench]
pub fn iter_0(b: &mut Bencher) {
bench_iter(b, 1_000, 0);
}
#[bench]
pub fn iter_1(b: &mut Bencher) {
bench_iter(b, 1_000, 1);
}
#[bench]
pub fn iter_100(b: &mut Bencher) {
bench_iter(b, 1_000, 100);
}
#[bench]
pub fn iter_10k(b: &mut Bencher) {
bench_iter(b, 1_000, 10_000);
}
#[bench]
pub fn iter_1m(b: &mut Bencher) {
bench_iter(b, 1_000, 1_000_000);
}
const FAT: usize = 256;
// The returned map has small keys and values.
// Benchmarks on it have a counterpart in set.rs with the same keys and no values at all.
fn slim_map(n: usize) -> BTreeMap<usize, usize> {
(0..n).map(|i| (i, i)).collect::<BTreeMap<_, _>>()
}
// The returned map has small keys and large values.
fn fat_val_map(n: usize) -> BTreeMap<usize, [usize; FAT]> {
(0..n).map(|i| (i, [i; FAT])).collect::<BTreeMap<_, _>>()
}
// The returned map has large keys and values.
fn fat_map(n: usize) -> BTreeMap<[usize; FAT], [usize; FAT]> {
(0..n).map(|i| ([i; FAT], [i; FAT])).collect::<BTreeMap<_, _>>()
}
#[bench]
pub fn clone_slim_100(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_slim_100_and_clear(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_slim_100_and_drain_all(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone().drain_filter(|_, _| true).count())
}
#[bench]
pub fn clone_slim_100_and_drain_half(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 100 / 2);
assert_eq!(map.len(), 100 / 2);
})
}
#[bench]
pub fn clone_slim_100_and_into_iter(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_slim_100_and_pop_all(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_slim_100_and_remove_all(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_slim_100_and_remove_half(b: &mut Bencher) {
let src = slim_map(100);
b.iter(|| {
let mut map = src.clone();
for i in (0..100).step_by(2) {
let v = map.remove(&i);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 100 / 2);
map
})
}
#[bench]
pub fn clone_slim_10k(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_slim_10k_and_clear(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_slim_10k_and_drain_all(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone().drain_filter(|_, _| true).count())
}
#[bench]
pub fn clone_slim_10k_and_drain_half(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 10_000 / 2);
assert_eq!(map.len(), 10_000 / 2);
})
}
#[bench]
pub fn clone_slim_10k_and_into_iter(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_slim_10k_and_pop_all(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_slim_10k_and_remove_all(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_slim_10k_and_remove_half(b: &mut Bencher) {
let src = slim_map(10_000);
b.iter(|| {
let mut map = src.clone();
for i in (0..10_000).step_by(2) {
let v = map.remove(&i);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 10_000 / 2);
map
})
}
#[bench]
pub fn clone_fat_val_100(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_fat_val_100_and_clear(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_fat_val_100_and_drain_all(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone().drain_filter(|_, _| true).count())
}
#[bench]
pub fn clone_fat_val_100_and_drain_half(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 100 / 2);
assert_eq!(map.len(), 100 / 2);
})
}
#[bench]
pub fn clone_fat_val_100_and_into_iter(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_fat_val_100_and_pop_all(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_fat_val_100_and_remove_all(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_fat_val_100_and_remove_half(b: &mut Bencher) {
let src = fat_val_map(100);
b.iter(|| {
let mut map = src.clone();
for i in (0..100).step_by(2) {
let v = map.remove(&i);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 100 / 2);
map
})
}
#[bench]
pub fn clone_fat_100(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_fat_100_and_clear(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_fat_100_and_drain_all(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| src.clone().drain_filter(|_, _| true).count())
}
#[bench]
pub fn clone_fat_100_and_drain_half(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| {
let mut map = src.clone();
assert_eq!(map.drain_filter(|i, _| i[0] % 2 == 0).count(), 100 / 2);
assert_eq!(map.len(), 100 / 2);
})
}
#[bench]
pub fn clone_fat_100_and_into_iter(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_fat_100_and_pop_all(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| {
let mut map = src.clone();
while map.pop_first().is_some() {}
map
});
}
#[bench]
pub fn clone_fat_100_and_remove_all(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| {
let mut map = src.clone();
while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
let v = map.remove(&elt);
debug_assert!(v.is_some());
}
map
});
}
#[bench]
pub fn clone_fat_100_and_remove_half(b: &mut Bencher) {
let src = fat_map(100);
b.iter(|| {
let mut map = src.clone();
for i in (0..100).step_by(2) {
let v = map.remove(&[i; FAT]);
debug_assert!(v.is_some());
}
assert_eq!(map.len(), 100 / 2);
map
})
}

View File

@ -0,0 +1,224 @@
use std::collections::BTreeSet;
use rand::{thread_rng, Rng};
use test::Bencher;
fn random(n: usize) -> BTreeSet<usize> {
let mut rng = thread_rng();
let mut set = BTreeSet::new();
while set.len() < n {
set.insert(rng.gen());
}
assert_eq!(set.len(), n);
set
}
fn neg(n: usize) -> BTreeSet<i32> {
let set: BTreeSet<i32> = (-(n as i32)..=-1).collect();
assert_eq!(set.len(), n);
set
}
fn pos(n: usize) -> BTreeSet<i32> {
let set: BTreeSet<i32> = (1..=(n as i32)).collect();
assert_eq!(set.len(), n);
set
}
fn stagger(n1: usize, factor: usize) -> [BTreeSet<u32>; 2] {
let n2 = n1 * factor;
let mut sets = [BTreeSet::new(), BTreeSet::new()];
for i in 0..(n1 + n2) {
let b = i % (factor + 1) != 0;
sets[b as usize].insert(i as u32);
}
assert_eq!(sets[0].len(), n1);
assert_eq!(sets[1].len(), n2);
sets
}
macro_rules! set_bench {
($name: ident, $set_func: ident, $result_func: ident, $sets: expr) => {
#[bench]
pub fn $name(b: &mut Bencher) {
// setup
let sets = $sets;
// measure
b.iter(|| sets[0].$set_func(&sets[1]).$result_func())
}
};
}
fn slim_set(n: usize) -> BTreeSet<usize> {
(0..n).collect::<BTreeSet<_>>()
}
#[bench]
pub fn clone_100(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_100_and_clear(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_100_and_drain_all(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone().drain_filter(|_| true).count())
}
#[bench]
pub fn clone_100_and_drain_half(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 100 / 2);
assert_eq!(set.len(), 100 / 2);
})
}
#[bench]
pub fn clone_100_and_into_iter(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_100_and_pop_all(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
while set.pop_first().is_some() {}
set
});
}
#[bench]
pub fn clone_100_and_remove_all(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
while let Some(elt) = set.iter().copied().next() {
let ok = set.remove(&elt);
debug_assert!(ok);
}
set
});
}
#[bench]
pub fn clone_100_and_remove_half(b: &mut Bencher) {
let src = slim_set(100);
b.iter(|| {
let mut set = src.clone();
for i in (0..100).step_by(2) {
let ok = set.remove(&i);
debug_assert!(ok);
}
assert_eq!(set.len(), 100 / 2);
set
})
}
#[bench]
pub fn clone_10k(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone())
}
#[bench]
pub fn clone_10k_and_clear(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone().clear())
}
#[bench]
pub fn clone_10k_and_drain_all(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone().drain_filter(|_| true).count())
}
#[bench]
pub fn clone_10k_and_drain_half(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 10_000 / 2);
assert_eq!(set.len(), 10_000 / 2);
})
}
#[bench]
pub fn clone_10k_and_into_iter(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| src.clone().into_iter().count())
}
#[bench]
pub fn clone_10k_and_pop_all(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
while set.pop_first().is_some() {}
set
});
}
#[bench]
pub fn clone_10k_and_remove_all(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
while let Some(elt) = set.iter().copied().next() {
let ok = set.remove(&elt);
debug_assert!(ok);
}
set
});
}
#[bench]
pub fn clone_10k_and_remove_half(b: &mut Bencher) {
let src = slim_set(10_000);
b.iter(|| {
let mut set = src.clone();
for i in (0..10_000).step_by(2) {
let ok = set.remove(&i);
debug_assert!(ok);
}
assert_eq!(set.len(), 10_000 / 2);
set
})
}
set_bench! {intersection_100_neg_vs_100_pos, intersection, count, [neg(100), pos(100)]}
set_bench! {intersection_100_neg_vs_10k_pos, intersection, count, [neg(100), pos(10_000)]}
set_bench! {intersection_100_pos_vs_100_neg, intersection, count, [pos(100), neg(100)]}
set_bench! {intersection_100_pos_vs_10k_neg, intersection, count, [pos(100), neg(10_000)]}
set_bench! {intersection_10k_neg_vs_100_pos, intersection, count, [neg(10_000), pos(100)]}
set_bench! {intersection_10k_neg_vs_10k_pos, intersection, count, [neg(10_000), pos(10_000)]}
set_bench! {intersection_10k_pos_vs_100_neg, intersection, count, [pos(10_000), neg(100)]}
set_bench! {intersection_10k_pos_vs_10k_neg, intersection, count, [pos(10_000), neg(10_000)]}
set_bench! {intersection_random_100_vs_100, intersection, count, [random(100), random(100)]}
set_bench! {intersection_random_100_vs_10k, intersection, count, [random(100), random(10_000)]}
set_bench! {intersection_random_10k_vs_100, intersection, count, [random(10_000), random(100)]}
set_bench! {intersection_random_10k_vs_10k, intersection, count, [random(10_000), random(10_000)]}
set_bench! {intersection_staggered_100_vs_100, intersection, count, stagger(100, 1)}
set_bench! {intersection_staggered_10k_vs_10k, intersection, count, stagger(10_000, 1)}
set_bench! {intersection_staggered_100_vs_10k, intersection, count, stagger(100, 100)}
set_bench! {difference_random_100_vs_100, difference, count, [random(100), random(100)]}
set_bench! {difference_random_100_vs_10k, difference, count, [random(100), random(10_000)]}
set_bench! {difference_random_10k_vs_100, difference, count, [random(10_000), random(100)]}
set_bench! {difference_random_10k_vs_10k, difference, count, [random(10_000), random(10_000)]}
set_bench! {difference_staggered_100_vs_100, difference, count, stagger(100, 1)}
set_bench! {difference_staggered_10k_vs_10k, difference, count, stagger(10_000, 1)}
set_bench! {difference_staggered_100_vs_10k, difference, count, stagger(100, 100)}
set_bench! {is_subset_100_vs_100, is_subset, clone, [pos(100), pos(100)]}
set_bench! {is_subset_100_vs_10k, is_subset, clone, [pos(100), pos(10_000)]}
set_bench! {is_subset_10k_vs_100, is_subset, clone, [pos(10_000), pos(100)]}
set_bench! {is_subset_10k_vs_10k, is_subset, clone, [pos(10_000), pos(10_000)]}

View File

@ -0,0 +1,433 @@
use std::iter::{repeat, FromIterator};
use test::Bencher;
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
let v: Vec<u32> = Vec::new();
assert_eq!(v.len(), 0);
assert_eq!(v.capacity(), 0);
})
}
fn do_bench_with_capacity(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let v: Vec<u32> = Vec::with_capacity(src_len);
assert_eq!(v.len(), 0);
assert_eq!(v.capacity(), src_len);
})
}
#[bench]
fn bench_with_capacity_0000(b: &mut Bencher) {
do_bench_with_capacity(b, 0)
}
#[bench]
fn bench_with_capacity_0010(b: &mut Bencher) {
do_bench_with_capacity(b, 10)
}
#[bench]
fn bench_with_capacity_0100(b: &mut Bencher) {
do_bench_with_capacity(b, 100)
}
#[bench]
fn bench_with_capacity_1000(b: &mut Bencher) {
do_bench_with_capacity(b, 1000)
}
fn do_bench_from_fn(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let dst = (0..src_len).collect::<Vec<_>>();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
})
}
#[bench]
fn bench_from_fn_0000(b: &mut Bencher) {
do_bench_from_fn(b, 0)
}
#[bench]
fn bench_from_fn_0010(b: &mut Bencher) {
do_bench_from_fn(b, 10)
}
#[bench]
fn bench_from_fn_0100(b: &mut Bencher) {
do_bench_from_fn(b, 100)
}
#[bench]
fn bench_from_fn_1000(b: &mut Bencher) {
do_bench_from_fn(b, 1000)
}
fn do_bench_from_elem(b: &mut Bencher, src_len: usize) {
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<usize> = repeat(5).take(src_len).collect();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().all(|x| *x == 5));
})
}
#[bench]
fn bench_from_elem_0000(b: &mut Bencher) {
do_bench_from_elem(b, 0)
}
#[bench]
fn bench_from_elem_0010(b: &mut Bencher) {
do_bench_from_elem(b, 10)
}
#[bench]
fn bench_from_elem_0100(b: &mut Bencher) {
do_bench_from_elem(b, 100)
}
#[bench]
fn bench_from_elem_1000(b: &mut Bencher) {
do_bench_from_elem(b, 1000)
}
fn do_bench_from_slice(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst = src.clone()[..].to_vec();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
}
#[bench]
fn bench_from_slice_0000(b: &mut Bencher) {
do_bench_from_slice(b, 0)
}
#[bench]
fn bench_from_slice_0010(b: &mut Bencher) {
do_bench_from_slice(b, 10)
}
#[bench]
fn bench_from_slice_0100(b: &mut Bencher) {
do_bench_from_slice(b, 100)
}
#[bench]
fn bench_from_slice_1000(b: &mut Bencher) {
do_bench_from_slice(b, 1000)
}
fn do_bench_from_iter(b: &mut Bencher, src_len: usize) {
let src: Vec<_> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst: Vec<_> = FromIterator::from_iter(src.clone());
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
}
#[bench]
fn bench_from_iter_0000(b: &mut Bencher) {
do_bench_from_iter(b, 0)
}
#[bench]
fn bench_from_iter_0010(b: &mut Bencher) {
do_bench_from_iter(b, 10)
}
#[bench]
fn bench_from_iter_0100(b: &mut Bencher) {
do_bench_from_iter(b, 100)
}
#[bench]
fn bench_from_iter_1000(b: &mut Bencher) {
do_bench_from_iter(b, 1000)
}
fn do_bench_extend(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend(src.clone());
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
}
#[bench]
fn bench_extend_0000_0000(b: &mut Bencher) {
do_bench_extend(b, 0, 0)
}
#[bench]
fn bench_extend_0000_0010(b: &mut Bencher) {
do_bench_extend(b, 0, 10)
}
#[bench]
fn bench_extend_0000_0100(b: &mut Bencher) {
do_bench_extend(b, 0, 100)
}
#[bench]
fn bench_extend_0000_1000(b: &mut Bencher) {
do_bench_extend(b, 0, 1000)
}
#[bench]
fn bench_extend_0010_0010(b: &mut Bencher) {
do_bench_extend(b, 10, 10)
}
#[bench]
fn bench_extend_0100_0100(b: &mut Bencher) {
do_bench_extend(b, 100, 100)
}
#[bench]
fn bench_extend_1000_1000(b: &mut Bencher) {
do_bench_extend(b, 1000, 1000)
}
fn do_bench_extend_from_slice(b: &mut Bencher, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = src_len as u64;
b.iter(|| {
let mut dst = dst.clone();
dst.extend_from_slice(&src);
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
}
#[bench]
fn bench_extend_from_slice_0000_0000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 0)
}
#[bench]
fn bench_extend_from_slice_0000_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 10)
}
#[bench]
fn bench_extend_from_slice_0000_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 100)
}
#[bench]
fn bench_extend_from_slice_0000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 0, 1000)
}
#[bench]
fn bench_extend_from_slice_0010_0010(b: &mut Bencher) {
do_bench_extend_from_slice(b, 10, 10)
}
#[bench]
fn bench_extend_from_slice_0100_0100(b: &mut Bencher) {
do_bench_extend_from_slice(b, 100, 100)
}
#[bench]
fn bench_extend_from_slice_1000_1000(b: &mut Bencher) {
do_bench_extend_from_slice(b, 1000, 1000)
}
fn do_bench_clone(b: &mut Bencher, src_len: usize) {
let src: Vec<usize> = FromIterator::from_iter(0..src_len);
b.bytes = src_len as u64;
b.iter(|| {
let dst = src.clone();
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
}
#[bench]
fn bench_clone_0000(b: &mut Bencher) {
do_bench_clone(b, 0)
}
#[bench]
fn bench_clone_0010(b: &mut Bencher) {
do_bench_clone(b, 10)
}
#[bench]
fn bench_clone_0100(b: &mut Bencher) {
do_bench_clone(b, 100)
}
#[bench]
fn bench_clone_1000(b: &mut Bencher) {
do_bench_clone(b, 1000)
}
fn do_bench_clone_from(b: &mut Bencher, times: usize, dst_len: usize, src_len: usize) {
let dst: Vec<_> = FromIterator::from_iter(0..src_len);
let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
b.bytes = (times * src_len) as u64;
b.iter(|| {
let mut dst = dst.clone();
for _ in 0..times {
dst.clone_from(&src);
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| dst_len + i == *x));
}
});
}
#[bench]
fn bench_clone_from_01_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 0)
}
#[bench]
fn bench_clone_from_01_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 10)
}
#[bench]
fn bench_clone_from_01_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 100)
}
#[bench]
fn bench_clone_from_01_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 0, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 10)
}
#[bench]
fn bench_clone_from_01_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 100)
}
#[bench]
fn bench_clone_from_01_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 100)
}
#[bench]
fn bench_clone_from_01_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 1000)
}
#[bench]
fn bench_clone_from_01_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 1, 10, 0)
}
#[bench]
fn bench_clone_from_01_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 1, 100, 10)
}
#[bench]
fn bench_clone_from_01_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 1, 1000, 100)
}
#[bench]
fn bench_clone_from_10_0000_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 0)
}
#[bench]
fn bench_clone_from_10_0000_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 10)
}
#[bench]
fn bench_clone_from_10_0000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 100)
}
#[bench]
fn bench_clone_from_10_0000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 0, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 10)
}
#[bench]
fn bench_clone_from_10_0100_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 100)
}
#[bench]
fn bench_clone_from_10_1000_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 100)
}
#[bench]
fn bench_clone_from_10_0100_1000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 1000)
}
#[bench]
fn bench_clone_from_10_0010_0000(b: &mut Bencher) {
do_bench_clone_from(b, 10, 10, 0)
}
#[bench]
fn bench_clone_from_10_0100_0010(b: &mut Bencher) {
do_bench_clone_from(b, 10, 100, 10)
}
#[bench]
fn bench_clone_from_10_1000_0100(b: &mut Bencher) {
do_bench_clone_from(b, 10, 1000, 100)
}

323
library/alloc/src/alloc.rs Normal file
View File

@ -0,0 +1,323 @@
//! Memory allocation APIs
#![stable(feature = "alloc_module", since = "1.28.0")]
use core::intrinsics::{self, min_align_of_val, size_of_val};
use core::ptr::{NonNull, Unique};
#[stable(feature = "alloc_module", since = "1.28.0")]
#[doc(inline)]
pub use core::alloc::*;
#[cfg(test)]
mod tests;
extern "Rust" {
// These are the magic symbols to call the global allocator. rustc generates
// them from the `#[global_allocator]` attribute if there is one, or uses the
// default implementations in libstd (`__rdl_alloc` etc in `src/libstd/alloc.rs`)
// otherwise.
#[rustc_allocator]
#[rustc_allocator_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_allocator_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
#[rustc_allocator_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
#[rustc_allocator_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
/// The global memory allocator.
///
/// This type implements the [`AllocRef`] trait by forwarding calls
/// to the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// Note: while this type is unstable, the functionality it provides can be
/// accessed through the [free functions in `alloc`](index.html#functions).
#[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Copy, Clone, Default, Debug)]
pub struct Global;
/// Allocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::alloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `alloc` method
/// of the [`Global`] type when it and the [`AllocRef`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc`].
///
/// # Examples
///
/// ```
/// use std::alloc::{alloc, dealloc, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc(layout);
///
/// *(ptr as *mut u16) = 42;
/// assert_eq!(*(ptr as *mut u16), 42);
///
/// dealloc(ptr, layout);
/// }
/// ```
#[stable(feature = "global_alloc", since = "1.28.0")]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
unsafe { __rust_alloc(layout.size(), layout.align()) }
}
/// Deallocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `dealloc` method
/// of the [`Global`] type when it and the [`AllocRef`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::dealloc`].
#[stable(feature = "global_alloc", since = "1.28.0")]
#[inline]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
}
/// Reallocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::realloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `realloc` method
/// of the [`Global`] type when it and the [`AllocRef`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::realloc`].
#[stable(feature = "global_alloc", since = "1.28.0")]
#[inline]
pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
}
/// Allocate zero-initialized memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
/// of the [`Global`] type when it and the [`AllocRef`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc_zeroed`].
///
/// # Examples
///
/// ```
/// use std::alloc::{alloc_zeroed, dealloc, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc_zeroed(layout);
///
/// assert_eq!(*(ptr as *mut u16), 0);
///
/// dealloc(ptr, layout);
/// }
/// ```
#[stable(feature = "global_alloc", since = "1.28.0")]
#[inline]
pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
}
impl Global {
#[inline]
fn alloc_impl(&mut self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocErr> {
match layout.size() {
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
Ok(NonNull::slice_from_raw_parts(ptr, size))
},
}
}
// Safety: Same as `AllocRef::grow`
#[inline]
unsafe fn grow_impl(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocErr> {
debug_assert!(
new_size >= layout.size(),
"`new_size` must be greater than or equal to `layout.size()`"
);
match layout.size() {
// SAFETY: the caller must ensure that the `new_size` does not overflow.
// `layout.align()` comes from a `Layout` and is thus guaranteed to be valid for a Layout.
0 => unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc_impl(new_layout, zeroed)
},
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size => unsafe {
// `realloc` probably checks for `new_size >= size` or something similar.
intrinsics::assume(new_size >= layout.size());
let raw_ptr = realloc(ptr.as_ptr(), layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
}
}
}
#[unstable(feature = "allocator_api", issue = "32838")]
unsafe impl AllocRef for Global {
#[inline]
fn alloc(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocErr> {
self.alloc_impl(layout, false)
}
#[inline]
fn alloc_zeroed(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocErr> {
self.alloc_impl(layout, true)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { dealloc(ptr.as_ptr(), layout) }
}
}
#[inline]
unsafe fn grow(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<NonNull<[u8]>, AllocErr> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, layout, new_size, false) }
}
#[inline]
unsafe fn grow_zeroed(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<NonNull<[u8]>, AllocErr> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, layout, new_size, true) }
}
#[inline]
unsafe fn shrink(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<NonNull<[u8]>, AllocErr> {
debug_assert!(
new_size <= layout.size(),
"`new_size` must be smaller than or equal to `layout.size()`"
);
match new_size {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.dealloc(ptr, layout);
Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size => unsafe {
// `realloc` probably checks for `new_size <= size` or something similar.
intrinsics::assume(new_size <= layout.size());
let raw_ptr = realloc(ptr.as_ptr(), layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocErr)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
}
}
}
/// The allocator for unique pointers.
// This function must not unwind. If it does, MIR codegen will fail.
#[cfg(not(test))]
#[lang = "exchange_malloc"]
#[inline]
unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
match Global.alloc(layout) {
Ok(ptr) => ptr.as_non_null_ptr().as_ptr(),
Err(_) => handle_alloc_error(layout),
}
}
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: AllocRef`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: AllocRef>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: AllocRef>(Unique<T>, A)` as well.
pub(crate) unsafe fn box_free<T: ?Sized>(ptr: Unique<T>) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
let layout = Layout::from_size_align_unchecked(size, align);
Global.dealloc(ptr.cast().into(), layout)
}
}
/// Abort on memory allocation error or failure.
///
/// Callers of memory allocation APIs wishing to abort computation
/// in response to an allocation error are encouraged to call this function,
/// rather than directly invoking `panic!` or similar.
///
/// The default behavior of this function is to print a message to standard error
/// and abort the process.
/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
///
/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
#[stable(feature = "global_alloc", since = "1.28.0")]
#[rustc_allocator_nounwind]
pub fn handle_alloc_error(layout: Layout) -> ! {
extern "Rust" {
#[lang = "oom"]
fn oom_impl(layout: Layout) -> !;
}
unsafe { oom_impl(layout) }
}

View File

@ -0,0 +1,30 @@
use super::*;
extern crate test;
use crate::boxed::Box;
use test::Bencher;
#[test]
fn allocate_zeroed() {
unsafe {
let layout = Layout::from_size_align(1024, 1).unwrap();
let ptr =
Global.alloc_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout));
let mut i = ptr.as_non_null_ptr().as_ptr();
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
}
Global.dealloc(ptr.as_non_null_ptr(), layout);
}
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| {
let _: Box<_> = box 10;
})
}

1168
library/alloc/src/boxed.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,9 @@
//! A priority queue implemented with a binary heap.
//!
//! Insertion and popping the largest element have `O(log(n))` time complexity.
//! Checking the largest element is `O(1)`. Converting a vector to a binary heap
//! can be done in-place, and has `O(n)` complexity. A binary heap can also be
//! converted to a sorted vector in-place, allowing it to be used for an `O(n * log(n))`
//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
//! Checking the largest element is *O*(1). Converting a vector to a binary heap
//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* \* log(*n*))
//! in-place heapsort.
//!
//! # Examples
@ -12,9 +12,9 @@
//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
//! It shows how to use [`BinaryHeap`] with custom types.
//!
//! [dijkstra]: http://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
//! [sssp]: http://en.wikipedia.org/wiki/Shortest_path_problem
//! [dir_graph]: http://en.wikipedia.org/wiki/Directed_graph
//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
//! [`BinaryHeap`]: struct.BinaryHeap.html
//!
//! ```
@ -235,7 +235,7 @@ use super::SpecExtend;
///
/// | [push] | [pop] | [peek]/[peek\_mut] |
/// |--------|-----------|--------------------|
/// | O(1)~ | O(log(n)) | O(1) |
/// | O(1)~ | *O*(log(*n*)) | *O*(1) |
///
/// The value for `push` is an expected cost; the method documentation gives a
/// more detailed analysis.
@ -398,7 +398,7 @@ impl<T: Ord> BinaryHeap<T> {
///
/// # Time complexity
///
/// Cost is `O(1)` in the worst case.
/// Cost is *O*(1) in the worst case.
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: true }) }
@ -422,7 +422,7 @@ impl<T: Ord> BinaryHeap<T> {
///
/// # Time complexity
///
/// The worst case cost of `pop` on a heap containing *n* elements is `O(log(n))`.
/// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop(&mut self) -> Option<T> {
self.data.pop().map(|mut item| {
@ -455,15 +455,15 @@ impl<T: Ord> BinaryHeap<T> {
///
/// The expected cost of `push`, averaged over every possible ordering of
/// the elements being pushed, and over a sufficiently large number of
/// pushes, is `O(1)`. This is the most meaningful cost metric when pushing
/// pushes, is *O*(1). This is the most meaningful cost metric when pushing
/// elements that are *not* already in any sorted pattern.
///
/// The time complexity degrades if elements are pushed in predominantly
/// ascending order. In the worst case, elements are pushed in ascending
/// sorted order and the amortized cost per push is `O(log(n))` against a heap
/// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
/// containing *n* elements.
///
/// The worst case cost of a *single* call to `push` is `O(n)`. The worst case
/// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
/// occurs when capacity is exhausted and needs a resize. The resize cost
/// has been amortized in the previous figures.
#[stable(feature = "rust1", since = "1.0.0")]
@ -643,7 +643,7 @@ impl<T: Ord> BinaryHeap<T> {
/// The remaining elements will be removed on drop in heap order.
///
/// Note:
/// * `.drain_sorted()` is `O(n * log(n))`; much slower than `.drain()`.
/// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
/// You should use the latter for most cases.
///
/// # Examples
@ -756,7 +756,7 @@ impl<T> BinaryHeap<T> {
///
/// # Time complexity
///
/// Cost is `O(1)` in the worst case.
/// Cost is *O*(1) in the worst case.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peek(&self) -> Option<&T> {
self.data.get(0)
@ -1312,7 +1312,7 @@ unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
/// Converts a `Vec<T>` into a `BinaryHeap<T>`.
///
/// This conversion happens in-place, and has `O(n)` time complexity.
/// This conversion happens in-place, and has *O*(*n*) time complexity.
fn from(vec: Vec<T>) -> BinaryHeap<T> {
let mut heap = BinaryHeap { data: vec };
heap.rebuild();

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,54 @@
pub mod map;
mod navigate;
mod node;
mod search;
pub mod set;
#[doc(hidden)]
trait Recover<Q: ?Sized> {
type Key;
fn get(&self, key: &Q) -> Option<&Self::Key>;
fn take(&mut self, key: &Q) -> Option<Self::Key>;
fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
}
#[inline(always)]
pub unsafe fn unwrap_unchecked<T>(val: Option<T>) -> T {
val.unwrap_or_else(|| {
if cfg!(debug_assertions) {
panic!("'unchecked' unwrap on None in BTreeMap");
} else {
unsafe {
core::intrinsics::unreachable();
}
}
})
}
#[cfg(test)]
/// XorShiftRng
struct DeterministicRng {
x: u32,
y: u32,
z: u32,
w: u32,
}
#[cfg(test)]
impl DeterministicRng {
fn new() -> Self {
DeterministicRng { x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb }
}
fn next(&mut self) -> u32 {
let x = self.x;
let t = x ^ (x << 11);
self.x = self.y;
self.y = self.z;
self.z = self.w;
let w_ = self.w;
self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
self.w
}
}

View File

@ -0,0 +1,335 @@
use core::intrinsics;
use core::mem;
use core::ptr;
use super::node::{marker, ForceResult::*, Handle, NodeRef};
use super::unwrap_unchecked;
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
/// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the right side, which is either in the same leaf node or in an ancestor node.
/// If the leaf edge is the last one in the tree, returns [`Result::Err`] with the root node.
pub fn next_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
> {
let mut edge = self.forget_node_type();
loop {
edge = match edge.right_kv() {
Ok(internal_kv) => return Ok(internal_kv),
Err(last_edge) => match last_edge.into_node().ascend() {
Ok(parent_edge) => parent_edge.forget_node_type(),
Err(root) => return Err(root),
},
}
}
}
/// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the left side, which is either in the same leaf node or in an ancestor node.
/// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node.
pub fn next_back_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
> {
let mut edge = self.forget_node_type();
loop {
edge = match edge.left_kv() {
Ok(internal_kv) => return Ok(internal_kv),
Err(last_edge) => match last_edge.into_node().ascend() {
Ok(parent_edge) => parent_edge.forget_node_type(),
Err(root) => return Err(root),
},
}
}
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> {
/// Given an internal edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the right side, which is either in the same internal node or in an ancestor node.
/// If the internal edge is the last one in the tree, returns [`Result::Err`] with the root node.
pub fn next_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>,
NodeRef<BorrowType, K, V, marker::Internal>,
> {
let mut edge = self;
loop {
edge = match edge.right_kv() {
Ok(internal_kv) => return Ok(internal_kv),
Err(last_edge) => match last_edge.into_node().ascend() {
Ok(parent_edge) => parent_edge,
Err(root) => return Err(root),
},
}
}
}
}
macro_rules! def_next_kv_uncheched_dealloc {
{ unsafe fn $name:ident : $adjacent_kv:ident } => {
/// Given a leaf edge handle into an owned tree, returns a handle to the next KV,
/// while deallocating any node left behind.
/// Unsafe for two reasons:
/// - The caller must ensure that the leaf edge is not the last one in the tree.
/// - The node pointed at by the given handle, and its ancestors, may be deallocated,
/// while the reference to those nodes in the surviving ancestors is left dangling;
/// thus using the returned handle to navigate further is dangerous.
unsafe fn $name <K, V>(
leaf_edge: Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge>,
) -> Handle<NodeRef<marker::Owned, K, V, marker::LeafOrInternal>, marker::KV> {
let mut edge = leaf_edge.forget_node_type();
loop {
edge = match edge.$adjacent_kv() {
Ok(internal_kv) => return internal_kv,
Err(last_edge) => {
unsafe {
let parent_edge = last_edge.into_node().deallocate_and_ascend();
unwrap_unchecked(parent_edge).forget_node_type()
}
}
}
}
}
};
}
def_next_kv_uncheched_dealloc! {unsafe fn next_kv_unchecked_dealloc: right_kv}
def_next_kv_uncheched_dealloc! {unsafe fn next_back_kv_unchecked_dealloc: left_kv}
/// This replaces the value behind the `v` unique reference by calling the
/// relevant function, and returns a result obtained along the way.
///
/// If a panic occurs in the `change` closure, the entire process will be aborted.
#[inline]
fn replace<T, R>(v: &mut T, change: impl FnOnce(T) -> (T, R)) -> R {
struct PanicGuard;
impl Drop for PanicGuard {
fn drop(&mut self) {
intrinsics::abort()
}
}
let guard = PanicGuard;
let value = unsafe { ptr::read(v) };
let (new_value, ret) = change(value);
unsafe {
ptr::write(v, new_value);
}
mem::forget(guard);
ret
}
impl<'a, K, V> Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Moves the leaf edge handle to the next leaf edge and returns references to the
/// key and value in between.
/// Unsafe because the caller must ensure that the leaf edge is not the last one in the tree.
pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
replace(self, |leaf_edge| {
let kv = leaf_edge.next_kv();
let kv = unsafe { unwrap_unchecked(kv.ok()) };
(kv.next_leaf_edge(), kv.into_kv())
})
}
/// Moves the leaf edge handle to the previous leaf edge and returns references to the
/// key and value in between.
/// Unsafe because the caller must ensure that the leaf edge is not the first one in the tree.
pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
replace(self, |leaf_edge| {
let kv = leaf_edge.next_back_kv();
let kv = unsafe { unwrap_unchecked(kv.ok()) };
(kv.next_back_leaf_edge(), kv.into_kv())
})
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Moves the leaf edge handle to the next leaf edge and returns references to the
/// key and value in between.
/// Unsafe for two reasons:
/// - The caller must ensure that the leaf edge is not the last one in the tree.
/// - Using the updated handle may well invalidate the returned references.
pub unsafe fn next_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
let kv = replace(self, |leaf_edge| {
let kv = leaf_edge.next_kv();
let kv = unsafe { unwrap_unchecked(kv.ok()) };
(unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)
});
// Doing the descend (and perhaps another move) invalidates the references
// returned by `into_kv_mut`, so we have to do this last.
kv.into_kv_mut()
}
/// Moves the leaf edge handle to the previous leaf and returns references to the
/// key and value in between.
/// Unsafe for two reasons:
/// - The caller must ensure that the leaf edge is not the first one in the tree.
/// - Using the updated handle may well invalidate the returned references.
pub unsafe fn next_back_unchecked(&mut self) -> (&'a mut K, &'a mut V) {
let kv = replace(self, |leaf_edge| {
let kv = leaf_edge.next_back_kv();
let kv = unsafe { unwrap_unchecked(kv.ok()) };
(unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)
});
// Doing the descend (and perhaps another move) invalidates the references
// returned by `into_kv_mut`, so we have to do this last.
kv.into_kv_mut()
}
}
impl<K, V> Handle<NodeRef<marker::Owned, K, V, marker::Leaf>, marker::Edge> {
/// Moves the leaf edge handle to the next leaf edge and returns the key and value
/// in between, while deallocating any node left behind.
/// Unsafe for two reasons:
/// - The caller must ensure that the leaf edge is not the last one in the tree
/// and is not a handle previously resulting from counterpart `next_back_unchecked`.
/// - Further use of the updated leaf edge handle is very dangerous. In particular,
/// if the leaf edge is the last edge of a node, that node and possibly ancestors
/// will be deallocated, while the reference to those nodes in the surviving ancestor
/// is left dangling.
/// The only safe way to proceed with the updated handle is to compare it, drop it,
/// call this method again subject to both preconditions listed in the first point,
/// or call counterpart `next_back_unchecked` subject to its preconditions.
pub unsafe fn next_unchecked(&mut self) -> (K, V) {
replace(self, |leaf_edge| {
let kv = unsafe { next_kv_unchecked_dealloc(leaf_edge) };
let k = unsafe { ptr::read(kv.reborrow().into_kv().0) };
let v = unsafe { ptr::read(kv.reborrow().into_kv().1) };
(kv.next_leaf_edge(), (k, v))
})
}
/// Moves the leaf edge handle to the previous leaf edge and returns the key
/// and value in between, while deallocating any node left behind.
/// Unsafe for two reasons:
/// - The caller must ensure that the leaf edge is not the first one in the tree
/// and is not a handle previously resulting from counterpart `next_unchecked`.
/// - Further use of the updated leaf edge handle is very dangerous. In particular,
/// if the leaf edge is the first edge of a node, that node and possibly ancestors
/// will be deallocated, while the reference to those nodes in the surviving ancestor
/// is left dangling.
/// The only safe way to proceed with the updated handle is to compare it, drop it,
/// call this method again subject to both preconditions listed in the first point,
/// or call counterpart `next_unchecked` subject to its preconditions.
pub unsafe fn next_back_unchecked(&mut self) -> (K, V) {
replace(self, |leaf_edge| {
let kv = unsafe { next_back_kv_unchecked_dealloc(leaf_edge) };
let k = unsafe { ptr::read(kv.reborrow().into_kv().0) };
let v = unsafe { ptr::read(kv.reborrow().into_kv().1) };
(kv.next_back_leaf_edge(), (k, v))
})
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Returns the leftmost leaf edge in or underneath a node - in other words, the edge
/// you need first when navigating forward (or last when navigating backward).
#[inline]
pub fn first_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
let mut node = self;
loop {
match node.force() {
Leaf(leaf) => return leaf.first_edge(),
Internal(internal) => node = internal.first_edge().descend(),
}
}
}
/// Returns the rightmost leaf edge in or underneath a node - in other words, the edge
/// you need last when navigating forward (or first when navigating backward).
#[inline]
pub fn last_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
let mut node = self;
loop {
match node.force() {
Leaf(leaf) => return leaf.last_edge(),
Internal(internal) => node = internal.last_edge().descend(),
}
}
}
}
pub enum Position<BorrowType, K, V> {
Leaf(NodeRef<BorrowType, K, V, marker::Leaf>),
Internal(NodeRef<BorrowType, K, V, marker::Internal>),
InternalKV(Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>),
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
/// Visits leaf nodes and internal KVs in order of ascending keys, and also
/// visits internal nodes as a whole in a depth first order, meaning that
/// internal nodes precede their individual KVs and their child nodes.
pub fn visit_nodes_in_order<F>(self, mut visit: F)
where
F: FnMut(Position<marker::Immut<'a>, K, V>),
{
match self.force() {
Leaf(leaf) => visit(Position::Leaf(leaf)),
Internal(internal) => {
visit(Position::Internal(internal));
let mut edge = internal.first_edge();
loop {
edge = match edge.descend().force() {
Leaf(leaf) => {
visit(Position::Leaf(leaf));
match edge.next_kv() {
Ok(kv) => {
visit(Position::InternalKV(kv));
kv.right_edge()
}
Err(_) => return,
}
}
Internal(internal) => {
visit(Position::Internal(internal));
internal.first_edge()
}
}
}
}
}
}
/// Calculates the number of elements in a (sub)tree.
pub fn calc_length(self) -> usize {
let mut result = 0;
self.visit_nodes_in_order(|pos| match pos {
Position::Leaf(node) => result += node.len(),
Position::Internal(node) => result += node.len(),
Position::InternalKV(_) => (),
});
result
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> {
/// Returns the leaf edge closest to a KV for forward navigation.
pub fn next_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
match self.force() {
Leaf(leaf_kv) => leaf_kv.right_edge(),
Internal(internal_kv) => {
let next_internal_edge = internal_kv.right_edge();
next_internal_edge.descend().first_leaf_edge()
}
}
}
/// Returns the leaf edge closest to a KV for backward navigation.
pub fn next_back_leaf_edge(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
match self.force() {
Leaf(leaf_kv) => leaf_kv.left_edge(),
Internal(internal_kv) => {
let next_internal_edge = internal_kv.left_edge();
next_internal_edge.descend().last_leaf_edge()
}
}
}
}

View File

@ -43,6 +43,9 @@ use crate::boxed::Box;
const B: usize = 6;
pub const MIN_LEN: usize = B - 1;
pub const CAPACITY: usize = 2 * B - 1;
const KV_IDX_CENTER: usize = B - 1;
const EDGE_IDX_LEFT_OF_CENTER: usize = B - 1;
const EDGE_IDX_RIGHT_OF_CENTER: usize = B;
/// The underlying representation of leaf nodes.
#[repr(C)]
@ -94,7 +97,8 @@ struct InternalNode<K, V> {
data: LeafNode<K, V>,
/// The pointers to the children of this node. `len + 1` of these are considered
/// initialized and valid.
/// initialized and valid. Although during the process of `into_iter` or `drop`,
/// some pointers are dangling while others still need to be traversed.
edges: [MaybeUninit<BoxedNode<K, V>>; 2 * B],
}
@ -152,12 +156,18 @@ unsafe impl<K: Sync, V: Sync> Sync for Root<K, V> {}
unsafe impl<K: Send, V: Send> Send for Root<K, V> {}
impl<K, V> Root<K, V> {
/// Returns the number of levels below the root.
pub fn height(&self) -> usize {
self.height
}
/// Returns a new owned tree, with its own root node that is initially empty.
pub fn new_leaf() -> Self {
Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), height: 0 }
}
pub fn as_ref(&self) -> NodeRef<marker::Immut<'_>, K, V, marker::LeafOrInternal> {
/// Borrows and returns an immutable reference to the node owned by the root.
pub fn node_as_ref(&self) -> NodeRef<marker::Immut<'_>, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
@ -166,7 +176,8 @@ impl<K, V> Root<K, V> {
}
}
pub fn as_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal> {
/// Borrows and returns a mutable reference to the node owned by the root.
pub fn node_as_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal> {
NodeRef {
height: self.height,
node: self.node.as_ptr(),
@ -185,8 +196,9 @@ impl<K, V> Root<K, V> {
}
/// Adds a new internal node with a single edge, pointing to the previous root, and make that
/// new node the root. This increases the height by 1 and is the opposite of `pop_level`.
pub fn push_level(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
/// new node the root. This increases the height by 1 and is the opposite of
/// `pop_internal_level`.
pub fn push_internal_level(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
let mut new_node = Box::new(unsafe { InternalNode::new() });
new_node.edges[0].write(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) });
@ -207,23 +219,24 @@ impl<K, V> Root<K, V> {
ret
}
/// Removes the root node, using its first child as the new root. This cannot be called when
/// the tree consists only of a leaf node. As it is intended only to be called when the root
/// has only one edge, no cleanup is done on any of the other children of the root.
/// This decreases the height by 1 and is the opposite of `push_level`.
pub fn pop_level(&mut self) {
/// Removes the internal root node, using its first child as the new root.
/// As it is intended only to be called when the root has only one child,
/// no cleanup is done on any of the other children of the root.
/// This decreases the height by 1 and is the opposite of `push_internal_level`.
/// Panics if there is no internal level, i.e. if the root is a leaf.
pub fn pop_internal_level(&mut self) {
assert!(self.height > 0);
let top = self.node.ptr;
self.node = unsafe {
BoxedNode::from_ptr(
self.as_mut().cast_unchecked::<marker::Internal>().first_edge().descend().node,
self.node_as_mut().cast_unchecked::<marker::Internal>().first_edge().descend().node,
)
};
self.height -= 1;
unsafe {
(*self.as_mut().as_leaf_mut()).parent = ptr::null();
(*self.node_as_mut().as_leaf_mut()).parent = ptr::null();
}
unsafe {
@ -299,12 +312,6 @@ impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
self.height
}
/// Removes any static information about whether this node is a `Leaf` or an
/// `Internal` node.
pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
/// Temporarily takes out another, immutable reference to the same node.
fn reborrow(&self) -> NodeRef<marker::Immut<'_>, K, V, Type> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
@ -408,8 +415,8 @@ impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
/// Unsafely asserts to the compiler some static information about whether this
/// node is a `Leaf`.
unsafe fn cast_unchecked<NewType>(&mut self) -> NodeRef<marker::Mut<'_>, K, V, NewType> {
/// node is a `Leaf` or an `Internal`.
unsafe fn cast_unchecked<NewType>(self) -> NodeRef<marker::Mut<'a>, K, V, NewType> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
@ -460,12 +467,6 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
fn into_val_slice(self) -> &'a [V] {
unsafe { slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().vals), self.len()) }
}
fn into_slices(self) -> (&'a [K], &'a [V]) {
// SAFETY: equivalent to reborrow() except not requiring Type: 'a
let k = unsafe { ptr::read(&self) };
(k.into_key_slice(), self.into_val_slice())
}
}
impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
@ -515,7 +516,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
/// Adds a key/value pair the end of the node.
/// Adds a key/value pair to the end of the node.
pub fn push(&mut self, key: K, val: V) {
assert!(self.len() < CAPACITY);
@ -602,8 +603,10 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
}
impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
/// Removes a key/value pair from the end of this node. If this is an internal node,
/// also removes the edge that was to the right of that pair.
/// Removes a key/value pair from the end of this node and returns the pair.
/// If this is an internal node, also removes the edge that was to the right
/// of that pair and returns the orphaned node that this edge owned with its
/// parent erased.
pub fn pop(&mut self) -> (K, V, Option<Root<K, V>>) {
assert!(self.len() > 0);
@ -618,7 +621,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
let edge =
ptr::read(internal.as_internal().edges.get_unchecked(idx + 1).as_ptr());
let mut new_root = Root { node: edge, height: internal.height - 1 };
(*new_root.as_mut().as_leaf_mut()).parent = ptr::null();
(*new_root.node_as_mut().as_leaf_mut()).parent = ptr::null();
Some(new_root)
}
};
@ -650,7 +653,7 @@ impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
);
let mut new_root = Root { node: edge, height: internal.height - 1 };
(*new_root.as_mut().as_leaf_mut()).parent = ptr::null();
(*new_root.node_as_mut().as_leaf_mut()).parent = ptr::null();
for i in 0..old_len {
Handle::new_edge(internal.reborrow_mut(), i).correct_parent_link();
@ -721,7 +724,7 @@ impl<Node: Copy, Type> Clone for Handle<Node, Type> {
}
impl<Node, Type> Handle<Node, Type> {
/// Retrieves the node that contains the edge of key/value pair this handle points to.
/// Retrieves the node that contains the edge or key/value pair this handle points to.
pub fn into_node(self) -> Node {
self.node
}
@ -821,13 +824,34 @@ impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, mar
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
enum InsertionPlace {
Left(usize),
Right(usize),
}
/// Given an edge index where we want to insert into a node filled to capacity,
/// computes a sensible KV index of a split point and where to perform the insertion.
/// The goal of the split point is for its key and value to end up in a parent node;
/// the keys, values and edges to the left of the split point become the left child;
/// the keys, values and edges to the right of the split point become the right child.
fn splitpoint(edge_idx: usize) -> (usize, InsertionPlace) {
debug_assert!(edge_idx <= CAPACITY);
// Rust issue #74834 tries to explain these symmetric rules.
match edge_idx {
0..EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER - 1, InsertionPlace::Left(edge_idx)),
EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER, InsertionPlace::Left(edge_idx)),
EDGE_IDX_RIGHT_OF_CENTER => (KV_IDX_CENTER, InsertionPlace::Right(0)),
_ => (KV_IDX_CENTER + 1, InsertionPlace::Right(edge_idx - (KV_IDX_CENTER + 1 + 1))),
}
}
impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::Edge> {
/// Helps implementations of `insert_fit` for a particular `NodeType`,
/// by taking care of leaf data.
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method assumes that there is enough space in the node for the new
/// pair to fit.
///
/// The returned pointer points to the inserted value.
fn insert_fit(&mut self, key: K, val: V) -> *mut V {
fn leafy_insert_fit(&mut self, key: K, val: V) {
// Necessary for correctness, but in a private module
debug_assert!(self.node.len() < CAPACITY);
@ -836,35 +860,49 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge
slice_insert(self.node.vals_mut(), self.idx, val);
(*self.node.as_leaf_mut()).len += 1;
self.node.vals_mut().get_unchecked_mut(self.idx)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method assumes that there is enough space in the node for the new
/// pair to fit.
///
/// The returned pointer points to the inserted value.
fn insert_fit(&mut self, key: K, val: V) -> *mut V {
self.leafy_insert_fit(key, val);
unsafe { self.node.vals_mut().get_unchecked_mut(self.idx) }
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method splits the node if there isn't enough room.
///
/// The returned pointer points to the inserted value.
pub fn insert(mut self, key: K, val: V) -> (InsertResult<'a, K, V, marker::Leaf>, *mut V) {
fn insert(mut self, key: K, val: V) -> (InsertResult<'a, K, V, marker::Leaf>, *mut V) {
if self.node.len() < CAPACITY {
let ptr = self.insert_fit(key, val);
let kv = unsafe { Handle::new_kv(self.node, self.idx) };
(InsertResult::Fit(kv), ptr)
} else {
let middle = unsafe { Handle::new_kv(self.node, B) };
let (middle_kv_idx, insertion) = splitpoint(self.idx);
let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
let (mut left, k, v, mut right) = middle.split();
let ptr = if self.idx <= B {
unsafe { Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val) }
} else {
unsafe {
let ptr = match insertion {
InsertionPlace::Left(insert_idx) => unsafe {
Handle::new_edge(left.reborrow_mut(), insert_idx).insert_fit(key, val)
},
InsertionPlace::Right(insert_idx) => unsafe {
Handle::new_edge(
right.as_mut().cast_unchecked::<marker::Leaf>(),
self.idx - (B + 1),
right.node_as_mut().cast_unchecked::<marker::Leaf>(),
insert_idx,
)
.insert_fit(key, val)
}
},
};
(InsertResult::Split(left, k, v, right), ptr)
(InsertResult::Split(SplitResult { left: left.forget_type(), k, v, right }), ptr)
}
}
}
@ -882,14 +920,6 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
}
}
/// Unsafely asserts to the compiler some static information about whether the underlying
/// node of this handle is a `Leaf`.
unsafe fn cast_unchecked<NewType>(
&mut self,
) -> Handle<NodeRef<marker::Mut<'_>, K, V, NewType>, marker::Edge> {
unsafe { Handle::new_edge(self.node.cast_unchecked(), self.idx) }
}
/// Inserts a new key/value pair and an edge that will go to the right of that new pair
/// between this edge and the key/value pair to the right of this edge. This method assumes
/// that there is enough space in the node for the new pair to fit.
@ -899,8 +929,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
debug_assert!(edge.height == self.node.height - 1);
unsafe {
// This cast is a lie, but it allows us to reuse the key/value insertion logic.
self.cast_unchecked::<marker::Leaf>().insert_fit(key, val);
self.leafy_insert_fit(key, val);
slice_insert(
slice::from_raw_parts_mut(
@ -920,7 +949,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
/// Inserts a new key/value pair and an edge that will go to the right of that new pair
/// between this edge and the key/value pair to the right of this edge. This method splits
/// the node if there isn't enough room.
pub fn insert(
fn insert(
mut self,
key: K,
val: V,
@ -933,22 +962,58 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
let kv = unsafe { Handle::new_kv(self.node, self.idx) };
InsertResult::Fit(kv)
} else {
let middle = unsafe { Handle::new_kv(self.node, B) };
let (middle_kv_idx, insertion) = splitpoint(self.idx);
let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
let (mut left, k, v, mut right) = middle.split();
if self.idx <= B {
unsafe {
Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val, edge);
}
} else {
unsafe {
match insertion {
InsertionPlace::Left(insert_idx) => unsafe {
Handle::new_edge(left.reborrow_mut(), insert_idx).insert_fit(key, val, edge);
},
InsertionPlace::Right(insert_idx) => unsafe {
Handle::new_edge(
right.as_mut().cast_unchecked::<marker::Internal>(),
self.idx - (B + 1),
right.node_as_mut().cast_unchecked::<marker::Internal>(),
insert_idx,
)
.insert_fit(key, val, edge);
}
},
}
InsertResult::Split(left, k, v, right)
InsertResult::Split(SplitResult { left: left.forget_type(), k, v, right })
}
}
}
impl<'a, K: 'a, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
/// Inserts a new key/value pair between the key/value pairs to the right and left of
/// this edge. This method splits the node if there isn't enough room, and tries to
/// insert the split off portion into the parent node recursively, until the root is reached.
///
/// If the returned result is a `Fit`, its handle's node can be this edge's node or an ancestor.
/// If the returned result is a `Split`, the `left` field will be the root node.
/// The returned pointer points to the inserted value.
pub fn insert_recursing(
self,
key: K,
value: V,
) -> (InsertResult<'a, K, V, marker::LeafOrInternal>, *mut V) {
let (mut split, val_ptr) = match self.insert(key, value) {
(InsertResult::Fit(handle), ptr) => {
return (InsertResult::Fit(handle.forget_node_type()), ptr);
}
(InsertResult::Split(split), val_ptr) => (split, val_ptr),
};
loop {
split = match split.left.ascend() {
Ok(parent) => match parent.insert(split.k, split.v, split.right) {
InsertResult::Fit(handle) => {
return (InsertResult::Fit(handle.forget_node_type()), val_ptr);
}
InsertResult::Split(split) => split,
},
Err(root) => {
return (InsertResult::Split(SplitResult { left: root, ..split }), val_ptr);
}
};
}
}
}
@ -972,14 +1037,23 @@ impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marke
impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Immut<'a>, K, V, NodeType>, marker::KV> {
pub fn into_kv(self) -> (&'a K, &'a V) {
unsafe {
let (keys, vals) = self.node.into_slices();
(keys.get_unchecked(self.idx), vals.get_unchecked(self.idx))
}
let keys = self.node.into_key_slice();
let vals = self.node.into_val_slice();
unsafe { (keys.get_unchecked(self.idx), vals.get_unchecked(self.idx)) }
}
}
impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
pub fn into_key_mut(self) -> &'a mut K {
let keys = self.node.into_key_slice_mut();
unsafe { keys.get_unchecked_mut(self.idx) }
}
pub fn into_val_mut(self) -> &'a mut V {
let vals = self.node.into_val_slice_mut();
unsafe { vals.get_unchecked_mut(self.idx) }
}
pub fn into_kv_mut(self) -> (&'a mut K, &'a mut V) {
unsafe {
let (keys, vals) = self.node.into_slices_mut();
@ -997,18 +1071,11 @@ impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
/// Splits the underlying node into three parts:
///
/// - The node is truncated to only contain the key/value pairs to the right of
/// this handle.
/// - The key and value pointed to by this handle and extracted.
/// - All the key/value pairs to the right of this handle are put into a newly
/// allocated node.
pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) {
impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
/// Helps implementations of `split` for a particular `NodeType`,
/// by taking care of leaf data.
fn leafy_split(&mut self, new_node: &mut LeafNode<K, V>) -> (K, V, usize) {
unsafe {
let mut new_node = Box::new(LeafNode::new());
let k = ptr::read(self.node.keys().get_unchecked(self.idx));
let v = ptr::read(self.node.vals().get_unchecked(self.idx));
@ -1027,21 +1094,39 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV>
(*self.node.as_leaf_mut()).len = self.idx as u16;
new_node.len = new_len as u16;
(k, v, new_len)
}
}
}
impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
/// Splits the underlying node into three parts:
///
/// - The node is truncated to only contain the key/value pairs to the right of
/// this handle.
/// - The key and value pointed to by this handle and extracted.
/// - All the key/value pairs to the right of this handle are put into a newly
/// allocated node.
pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) {
unsafe {
let mut new_node = Box::new(LeafNode::new());
let (k, v, _) = self.leafy_split(&mut new_node);
(self.node, k, v, Root { node: BoxedNode::from_leaf(new_node), height: 0 })
}
}
/// Removes the key/value pair pointed to by this handle and returns it, along with the edge
/// between the now adjacent key/value pairs (if any) to the left and right of this handle.
/// that the key/value pair collapsed into.
pub fn remove(
mut self,
) -> (Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>, K, V) {
) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
unsafe {
let k = slice_remove(self.node.keys_mut(), self.idx);
let v = slice_remove(self.node.vals_mut(), self.idx);
(*self.node.as_leaf_mut()).len -= 1;
(self.left_edge(), k, v)
((k, v), self.left_edge())
}
}
}
@ -1058,35 +1143,19 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
unsafe {
let mut new_node = Box::new(InternalNode::new());
let k = ptr::read(self.node.keys().get_unchecked(self.idx));
let v = ptr::read(self.node.vals().get_unchecked(self.idx));
let (k, v, new_len) = self.leafy_split(&mut new_node.data);
let height = self.node.height;
let new_len = self.node.len() - self.idx - 1;
ptr::copy_nonoverlapping(
self.node.keys().as_ptr().add(self.idx + 1),
new_node.data.keys.as_mut_ptr() as *mut K,
new_len,
);
ptr::copy_nonoverlapping(
self.node.vals().as_ptr().add(self.idx + 1),
new_node.data.vals.as_mut_ptr() as *mut V,
new_len,
);
ptr::copy_nonoverlapping(
self.node.as_internal().edges.as_ptr().add(self.idx + 1),
new_node.edges.as_mut_ptr(),
new_len + 1,
);
(*self.node.as_leaf_mut()).len = self.idx as u16;
new_node.data.len = new_len as u16;
let mut new_root = Root { node: BoxedNode::from_internal(new_node), height };
for i in 0..(new_len + 1) {
Handle::new_edge(new_root.as_mut().cast_unchecked(), i).correct_parent_link();
Handle::new_edge(new_root.node_as_mut().cast_unchecked(), i).correct_parent_link();
}
(self.node, k, v, new_root)
@ -1107,7 +1176,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
/// to by this handle, and the node immediately to the right of this handle into one new
/// child of the underlying node, returning an edge referencing that new child.
///
/// Assumes that this edge `.can_merge()`.
/// Panics unless this edge `.can_merge()`.
pub fn merge(
mut self,
) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
@ -1115,10 +1184,9 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
let self2 = unsafe { ptr::read(&self) };
let mut left_node = self1.left_edge().descend();
let left_len = left_node.len();
let mut right_node = self2.right_edge().descend();
let right_node = self2.right_edge().descend();
let right_len = right_node.len();
// necessary for correctness, but in a private module
assert!(left_len + right_len < CAPACITY);
unsafe {
@ -1149,28 +1217,25 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
(*left_node.as_leaf_mut()).len += right_len as u16 + 1;
let layout = if self.node.height > 1 {
if self.node.height > 1 {
// SAFETY: the height of the nodes being merged is one below the height
// of the node of this edge, thus above zero, so they are internal.
let mut left_node = left_node.cast_unchecked();
let right_node = right_node.cast_unchecked();
ptr::copy_nonoverlapping(
right_node.cast_unchecked().as_internal().edges.as_ptr(),
left_node
.cast_unchecked()
.as_internal_mut()
.edges
.as_mut_ptr()
.add(left_len + 1),
right_node.reborrow().as_internal().edges.as_ptr(),
left_node.reborrow_mut().as_internal_mut().edges.as_mut_ptr().add(left_len + 1),
right_len + 1,
);
for i in left_len + 1..left_len + right_len + 2 {
Handle::new_edge(left_node.cast_unchecked().reborrow_mut(), i)
.correct_parent_link();
Handle::new_edge(left_node.reborrow_mut(), i).correct_parent_link();
}
Layout::new::<InternalNode<K, V>>()
Global.dealloc(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
} else {
Layout::new::<LeafNode<K, V>>()
};
Global.dealloc(right_node.node.cast(), layout);
Global.dealloc(right_node.node.cast(), Layout::new::<LeafNode<K, V>>());
}
Handle::new_edge(self.node, self.idx)
}
@ -1183,8 +1248,8 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
unsafe {
let (k, v, edge) = self.reborrow_mut().left_edge().descend().pop();
let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k);
let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v);
let k = mem::replace(self.kv_mut().0, k);
let v = mem::replace(self.kv_mut().1, v);
match self.reborrow_mut().right_edge().descend().force() {
ForceResult::Leaf(mut leaf) => leaf.push_front(k, v),
@ -1200,8 +1265,8 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
unsafe {
let (k, v, edge) = self.reborrow_mut().right_edge().descend().pop_front();
let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k);
let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v);
let k = mem::replace(self.kv_mut().0, k);
let v = mem::replace(self.kv_mut().1, v);
match self.reborrow_mut().left_edge().descend().force() {
ForceResult::Leaf(mut leaf) => leaf.push(k, v),
@ -1229,7 +1294,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
let parent_kv = {
let kv = self.reborrow_mut().into_kv_mut();
let kv = self.kv_mut();
(kv.0 as *mut K, kv.1 as *mut V)
};
@ -1286,7 +1351,7 @@ impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::
let left_kv = left_node.reborrow_mut().into_kv_pointers_mut();
let right_kv = right_node.reborrow_mut().into_kv_pointers_mut();
let parent_kv = {
let kv = self.reborrow_mut().into_kv_mut();
let kv = self.kv_mut();
(kv.0 as *mut K, kv.1 as *mut V)
};
@ -1354,6 +1419,20 @@ unsafe fn move_edges<K, V>(
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Leaf> {
/// Removes any static information asserting that this node is a `Leaf` node.
pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
}
impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
/// Removes any static information asserting that this node is an `Internal` node.
pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData }
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
pub fn forget_node_type(
self,
@ -1378,6 +1457,14 @@ impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::K
}
}
impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV> {
pub fn forget_node_type(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> {
unsafe { Handle::new_kv(self.node.forget_type(), self.idx) }
}
}
impl<BorrowType, K, V, HandleType>
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, HandleType>
{
@ -1444,9 +1531,21 @@ pub enum ForceResult<Leaf, Internal> {
Internal(Internal),
}
/// Result of insertion, when a node needed to expand beyond its capacity.
/// Does not distinguish between `Leaf` and `Internal` because `Root` doesn't.
pub struct SplitResult<'a, K, V> {
// Altered node in existing tree with elements and edges that belong to the left of `k`.
pub left: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
// Some key and value split off, to be inserted elsewhere.
pub k: K,
pub v: V,
// Owned, unattached, new node with elements and edges that belong to the right of `k`.
pub right: Root<K, V>,
}
pub enum InsertResult<'a, K, V, Type> {
Fit(Handle<NodeRef<marker::Mut<'a>, K, V, Type>, marker::KV>),
Split(NodeRef<marker::Mut<'a>, K, V, Type>, K, V, Root<K, V>),
Split(SplitResult<'a, K, V>),
}
pub mod marker {
@ -1478,3 +1577,6 @@ unsafe fn slice_remove<T>(slice: &mut [T], idx: usize) -> T {
ret
}
}
#[cfg(test)]
mod tests;

View File

@ -0,0 +1,25 @@
use super::*;
#[test]
fn test_splitpoint() {
for idx in 0..=CAPACITY {
let (middle_kv_idx, insertion) = splitpoint(idx);
// Simulate performing the split:
let mut left_len = middle_kv_idx;
let mut right_len = CAPACITY - middle_kv_idx - 1;
match insertion {
InsertionPlace::Left(edge_idx) => {
assert!(edge_idx <= left_len);
left_len += 1;
}
InsertionPlace::Right(edge_idx) => {
assert!(edge_idx <= right_len);
right_len += 1;
}
}
assert!(left_len >= MIN_LEN);
assert!(right_len >= MIN_LEN);
assert!(left_len + right_len == CAPACITY);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,649 @@
use crate::collections::BTreeSet;
use crate::vec::Vec;
use std::iter::FromIterator;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::{AtomicU32, Ordering};
use super::super::DeterministicRng;
#[test]
fn test_clone_eq() {
let mut m = BTreeSet::new();
m.insert(1);
m.insert(2);
assert_eq!(m.clone(), m);
}
#[test]
fn test_iter_min_max() {
let mut a = BTreeSet::new();
assert_eq!(a.iter().min(), None);
assert_eq!(a.iter().max(), None);
assert_eq!(a.range(..).min(), None);
assert_eq!(a.range(..).max(), None);
assert_eq!(a.difference(&BTreeSet::new()).min(), None);
assert_eq!(a.difference(&BTreeSet::new()).max(), None);
assert_eq!(a.intersection(&a).min(), None);
assert_eq!(a.intersection(&a).max(), None);
assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), None);
assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), None);
assert_eq!(a.union(&a).min(), None);
assert_eq!(a.union(&a).max(), None);
a.insert(1);
a.insert(2);
assert_eq!(a.iter().min(), Some(&1));
assert_eq!(a.iter().max(), Some(&2));
assert_eq!(a.range(..).min(), Some(&1));
assert_eq!(a.range(..).max(), Some(&2));
assert_eq!(a.difference(&BTreeSet::new()).min(), Some(&1));
assert_eq!(a.difference(&BTreeSet::new()).max(), Some(&2));
assert_eq!(a.intersection(&a).min(), Some(&1));
assert_eq!(a.intersection(&a).max(), Some(&2));
assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), Some(&1));
assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), Some(&2));
assert_eq!(a.union(&a).min(), Some(&1));
assert_eq!(a.union(&a).max(), Some(&2));
}
fn check<F>(a: &[i32], b: &[i32], expected: &[i32], f: F)
where
F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut dyn FnMut(&i32) -> bool) -> bool,
{
let mut set_a = BTreeSet::new();
let mut set_b = BTreeSet::new();
for x in a {
assert!(set_a.insert(*x))
}
for y in b {
assert!(set_b.insert(*y))
}
let mut i = 0;
f(&set_a, &set_b, &mut |&x| {
if i < expected.len() {
assert_eq!(x, expected[i]);
}
i += 1;
true
});
assert_eq!(i, expected.len());
}
#[test]
fn test_intersection() {
fn check_intersection(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.intersection(y).all(f))
}
check_intersection(&[], &[], &[]);
check_intersection(&[1, 2, 3], &[], &[]);
check_intersection(&[], &[1, 2, 3], &[]);
check_intersection(&[2], &[1, 2, 3], &[2]);
check_intersection(&[1, 2, 3], &[2], &[2]);
check_intersection(&[11, 1, 3, 77, 103, 5, -5], &[2, 11, 77, -9, -42, 5, 3], &[3, 5, 11, 77]);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = (0..100).collect::<Vec<_>>();
check_intersection(&[], &large, &[]);
check_intersection(&large, &[], &[]);
check_intersection(&[-1], &large, &[]);
check_intersection(&large, &[-1], &[]);
check_intersection(&[0], &large, &[0]);
check_intersection(&large, &[0], &[0]);
check_intersection(&[99], &large, &[99]);
check_intersection(&large, &[99], &[99]);
check_intersection(&[100], &large, &[]);
check_intersection(&large, &[100], &[]);
check_intersection(&[11, 5000, 1, 3, 77, 8924], &large, &[1, 3, 11, 77]);
}
#[test]
fn test_intersection_size_hint() {
let x: BTreeSet<i32> = [3, 4].iter().copied().collect();
let y: BTreeSet<i32> = [1, 2, 3].iter().copied().collect();
let mut iter = x.intersection(&y);
assert_eq!(iter.size_hint(), (1, Some(1)));
assert_eq!(iter.next(), Some(&3));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
iter = y.intersection(&y);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.size_hint(), (0, Some(2)));
}
#[test]
fn test_difference() {
fn check_difference(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.difference(y).all(f))
}
check_difference(&[], &[], &[]);
check_difference(&[1, 12], &[], &[1, 12]);
check_difference(&[], &[1, 2, 3, 9], &[]);
check_difference(&[1, 3, 5, 9, 11], &[3, 9], &[1, 5, 11]);
check_difference(&[1, 3, 5, 9, 11], &[3, 6, 9], &[1, 5, 11]);
check_difference(&[1, 3, 5, 9, 11], &[0, 1], &[3, 5, 9, 11]);
check_difference(&[1, 3, 5, 9, 11], &[11, 12], &[1, 3, 5, 9]);
check_difference(
&[-5, 11, 22, 33, 40, 42],
&[-12, -5, 14, 23, 34, 38, 39, 50],
&[11, 22, 33, 40, 42],
);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = (0..100).collect::<Vec<_>>();
check_difference(&[], &large, &[]);
check_difference(&[-1], &large, &[-1]);
check_difference(&[0], &large, &[]);
check_difference(&[99], &large, &[]);
check_difference(&[100], &large, &[100]);
check_difference(&[11, 5000, 1, 3, 77, 8924], &large, &[5000, 8924]);
check_difference(&large, &[], &large);
check_difference(&large, &[-1], &large);
check_difference(&large, &[100], &large);
}
#[test]
fn test_difference_size_hint() {
let s246: BTreeSet<i32> = [2, 4, 6].iter().copied().collect();
let s23456: BTreeSet<i32> = (2..=6).collect();
let mut iter = s246.difference(&s23456);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), None);
let s12345: BTreeSet<i32> = (1..=5).collect();
iter = s246.difference(&s12345);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), Some(&6));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
let s34567: BTreeSet<i32> = (3..=7).collect();
iter = s246.difference(&s34567);
assert_eq!(iter.size_hint(), (0, Some(3)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (0, Some(2)));
assert_eq!(iter.next(), None);
let s1: BTreeSet<i32> = (-9..=1).collect();
iter = s246.difference(&s1);
assert_eq!(iter.size_hint(), (3, Some(3)));
let s2: BTreeSet<i32> = (-9..=2).collect();
iter = s246.difference(&s2);
assert_eq!(iter.size_hint(), (2, Some(2)));
assert_eq!(iter.next(), Some(&4));
assert_eq!(iter.size_hint(), (1, Some(1)));
let s23: BTreeSet<i32> = (2..=3).collect();
iter = s246.difference(&s23);
assert_eq!(iter.size_hint(), (1, Some(3)));
assert_eq!(iter.next(), Some(&4));
assert_eq!(iter.size_hint(), (1, Some(1)));
let s4: BTreeSet<i32> = (4..=4).collect();
iter = s246.difference(&s4);
assert_eq!(iter.size_hint(), (2, Some(3)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (1, Some(2)));
assert_eq!(iter.next(), Some(&6));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
let s56: BTreeSet<i32> = (5..=6).collect();
iter = s246.difference(&s56);
assert_eq!(iter.size_hint(), (1, Some(3)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (0, Some(2)));
let s6: BTreeSet<i32> = (6..=19).collect();
iter = s246.difference(&s6);
assert_eq!(iter.size_hint(), (2, Some(2)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (1, Some(1)));
let s7: BTreeSet<i32> = (7..=19).collect();
iter = s246.difference(&s7);
assert_eq!(iter.size_hint(), (3, Some(3)));
}
#[test]
fn test_symmetric_difference() {
fn check_symmetric_difference(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.symmetric_difference(y).all(f))
}
check_symmetric_difference(&[], &[], &[]);
check_symmetric_difference(&[1, 2, 3], &[2], &[1, 3]);
check_symmetric_difference(&[2], &[1, 2, 3], &[1, 3]);
check_symmetric_difference(&[1, 3, 5, 9, 11], &[-2, 3, 9, 14, 22], &[-2, 1, 5, 11, 14, 22]);
}
#[test]
fn test_symmetric_difference_size_hint() {
let x: BTreeSet<i32> = [2, 4].iter().copied().collect();
let y: BTreeSet<i32> = [1, 2, 3].iter().copied().collect();
let mut iter = x.symmetric_difference(&y);
assert_eq!(iter.size_hint(), (0, Some(5)));
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.size_hint(), (0, Some(4)));
assert_eq!(iter.next(), Some(&3));
assert_eq!(iter.size_hint(), (0, Some(1)));
}
#[test]
fn test_union() {
fn check_union(a: &[i32], b: &[i32], expected: &[i32]) {
check(a, b, expected, |x, y, f| x.union(y).all(f))
}
check_union(&[], &[], &[]);
check_union(&[1, 2, 3], &[2], &[1, 2, 3]);
check_union(&[2], &[1, 2, 3], &[1, 2, 3]);
check_union(
&[1, 3, 5, 9, 11, 16, 19, 24],
&[-2, 1, 5, 9, 13, 19],
&[-2, 1, 3, 5, 9, 11, 13, 16, 19, 24],
);
}
#[test]
fn test_union_size_hint() {
let x: BTreeSet<i32> = [2, 4].iter().copied().collect();
let y: BTreeSet<i32> = [1, 2, 3].iter().copied().collect();
let mut iter = x.union(&y);
assert_eq!(iter.size_hint(), (3, Some(5)));
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.size_hint(), (2, Some(4)));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.size_hint(), (1, Some(2)));
}
#[test]
// Only tests the simple function definition with respect to intersection
fn test_is_disjoint() {
let one = [1].iter().collect::<BTreeSet<_>>();
let two = [2].iter().collect::<BTreeSet<_>>();
assert!(one.is_disjoint(&two));
}
#[test]
// Also implicitly tests the trivial function definition of is_superset
fn test_is_subset() {
fn is_subset(a: &[i32], b: &[i32]) -> bool {
let set_a = a.iter().collect::<BTreeSet<_>>();
let set_b = b.iter().collect::<BTreeSet<_>>();
set_a.is_subset(&set_b)
}
assert_eq!(is_subset(&[], &[]), true);
assert_eq!(is_subset(&[], &[1, 2]), true);
assert_eq!(is_subset(&[0], &[1, 2]), false);
assert_eq!(is_subset(&[1], &[1, 2]), true);
assert_eq!(is_subset(&[2], &[1, 2]), true);
assert_eq!(is_subset(&[3], &[1, 2]), false);
assert_eq!(is_subset(&[1, 2], &[1]), false);
assert_eq!(is_subset(&[1, 2], &[1, 2]), true);
assert_eq!(is_subset(&[1, 2], &[2, 3]), false);
assert_eq!(
is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 33, 34, 38, 39, 40, 42]),
true
);
assert_eq!(is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 34, 38]), false);
if cfg!(miri) {
// Miri is too slow
return;
}
let large = (0..100).collect::<Vec<_>>();
assert_eq!(is_subset(&[], &large), true);
assert_eq!(is_subset(&large, &[]), false);
assert_eq!(is_subset(&[-1], &large), false);
assert_eq!(is_subset(&[0], &large), true);
assert_eq!(is_subset(&[1, 2], &large), true);
assert_eq!(is_subset(&[99, 100], &large), false);
}
#[test]
fn test_drain_filter() {
let mut x: BTreeSet<_> = [1].iter().copied().collect();
let mut y: BTreeSet<_> = [1].iter().copied().collect();
x.drain_filter(|_| true);
y.drain_filter(|_| false);
assert_eq!(x.len(), 0);
assert_eq!(y.len(), 1);
}
#[test]
fn test_drain_filter_drop_panic_leak() {
static PREDS: AtomicU32 = AtomicU32::new(0);
static DROPS: AtomicU32 = AtomicU32::new(0);
#[derive(PartialEq, Eq, PartialOrd, Ord)]
struct D(i32);
impl Drop for D {
fn drop(&mut self) {
if DROPS.fetch_add(1, Ordering::SeqCst) == 1 {
panic!("panic in `drop`");
}
}
}
let mut set = BTreeSet::new();
set.insert(D(0));
set.insert(D(4));
set.insert(D(8));
catch_unwind(move || {
drop(set.drain_filter(|d| {
PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
true
}))
})
.ok();
assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
assert_eq!(DROPS.load(Ordering::SeqCst), 3);
}
#[test]
fn test_drain_filter_pred_panic_leak() {
static PREDS: AtomicU32 = AtomicU32::new(0);
static DROPS: AtomicU32 = AtomicU32::new(0);
#[derive(PartialEq, Eq, PartialOrd, Ord)]
struct D(i32);
impl Drop for D {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut set = BTreeSet::new();
set.insert(D(0));
set.insert(D(4));
set.insert(D(8));
catch_unwind(AssertUnwindSafe(|| {
drop(set.drain_filter(|d| {
PREDS.fetch_add(1u32 << d.0, Ordering::SeqCst);
match d.0 {
0 => true,
_ => panic!(),
}
}))
}))
.ok();
assert_eq!(PREDS.load(Ordering::SeqCst), 0x011);
assert_eq!(DROPS.load(Ordering::SeqCst), 1);
assert_eq!(set.len(), 2);
assert_eq!(set.first().unwrap().0, 4);
assert_eq!(set.last().unwrap().0, 8);
}
#[test]
fn test_clear() {
let mut x = BTreeSet::new();
x.insert(1);
x.clear();
assert!(x.is_empty());
}
#[test]
fn test_zip() {
let mut x = BTreeSet::new();
x.insert(5);
x.insert(12);
x.insert(11);
let mut y = BTreeSet::new();
y.insert("foo");
y.insert("bar");
let x = x;
let y = y;
let mut z = x.iter().zip(&y);
assert_eq!(z.next().unwrap(), (&5, &("bar")));
assert_eq!(z.next().unwrap(), (&11, &("foo")));
assert!(z.next().is_none());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
let set: BTreeSet<_> = xs.iter().cloned().collect();
for x in &xs {
assert!(set.contains(x));
}
}
#[test]
fn test_show() {
let mut set = BTreeSet::new();
let empty = BTreeSet::<i32>::new();
set.insert(1);
set.insert(2);
let set_str = format!("{:?}", set);
assert_eq!(set_str, "{1, 2}");
assert_eq!(format!("{:?}", empty), "{}");
}
#[test]
fn test_extend_ref() {
let mut a = BTreeSet::new();
a.insert(1);
a.extend(&[2, 3, 4]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = BTreeSet::new();
b.insert(5);
b.insert(6);
a.extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
#[test]
fn test_recovery() {
use std::cmp::Ordering;
#[derive(Debug)]
struct Foo(&'static str, i32);
impl PartialEq for Foo {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for Foo {}
impl PartialOrd for Foo {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl Ord for Foo {
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
}
let mut s = BTreeSet::new();
assert_eq!(s.replace(Foo("a", 1)), None);
assert_eq!(s.len(), 1);
assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1)));
assert_eq!(s.len(), 1);
{
let mut it = s.iter();
assert_eq!(it.next(), Some(&Foo("a", 2)));
assert_eq!(it.next(), None);
}
assert_eq!(s.get(&Foo("a", 1)), Some(&Foo("a", 2)));
assert_eq!(s.take(&Foo("a", 1)), Some(Foo("a", 2)));
assert_eq!(s.len(), 0);
assert_eq!(s.get(&Foo("a", 1)), None);
assert_eq!(s.take(&Foo("a", 1)), None);
assert_eq!(s.iter().next(), None);
}
#[test]
#[allow(dead_code)]
fn test_variance() {
use std::collections::btree_set::{IntoIter, Iter, Range};
fn set<'new>(v: BTreeSet<&'static str>) -> BTreeSet<&'new str> {
v
}
fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> {
v
}
fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> {
v
}
fn range<'a, 'new>(v: Range<'a, &'static str>) -> Range<'a, &'new str> {
v
}
}
#[test]
fn test_append() {
let mut a = BTreeSet::new();
a.insert(1);
a.insert(2);
a.insert(3);
let mut b = BTreeSet::new();
b.insert(3);
b.insert(4);
b.insert(5);
a.append(&mut b);
assert_eq!(a.len(), 5);
assert_eq!(b.len(), 0);
assert_eq!(a.contains(&1), true);
assert_eq!(a.contains(&2), true);
assert_eq!(a.contains(&3), true);
assert_eq!(a.contains(&4), true);
assert_eq!(a.contains(&5), true);
}
#[test]
fn test_first_last() {
let mut a = BTreeSet::new();
assert_eq!(a.first(), None);
assert_eq!(a.last(), None);
a.insert(1);
assert_eq!(a.first(), Some(&1));
assert_eq!(a.last(), Some(&1));
a.insert(2);
assert_eq!(a.first(), Some(&1));
assert_eq!(a.last(), Some(&2));
for i in 3..=12 {
a.insert(i);
}
assert_eq!(a.first(), Some(&1));
assert_eq!(a.last(), Some(&12));
assert_eq!(a.pop_first(), Some(1));
assert_eq!(a.pop_last(), Some(12));
assert_eq!(a.pop_first(), Some(2));
assert_eq!(a.pop_last(), Some(11));
assert_eq!(a.pop_first(), Some(3));
assert_eq!(a.pop_last(), Some(10));
assert_eq!(a.pop_first(), Some(4));
assert_eq!(a.pop_first(), Some(5));
assert_eq!(a.pop_first(), Some(6));
assert_eq!(a.pop_first(), Some(7));
assert_eq!(a.pop_first(), Some(8));
assert_eq!(a.clone().pop_last(), Some(9));
assert_eq!(a.pop_first(), Some(9));
assert_eq!(a.pop_first(), None);
assert_eq!(a.pop_last(), None);
}
fn rand_data(len: usize) -> Vec<u32> {
let mut rng = DeterministicRng::new();
Vec::from_iter((0..len).map(|_| rng.next()))
}
#[test]
fn test_split_off_empty_right() {
let mut data = rand_data(173);
let mut set = BTreeSet::from_iter(data.clone());
let right = set.split_off(&(data.iter().max().unwrap() + 1));
data.sort();
assert!(set.into_iter().eq(data));
assert!(right.into_iter().eq(None));
}
#[test]
fn test_split_off_empty_left() {
let mut data = rand_data(314);
let mut set = BTreeSet::from_iter(data.clone());
let right = set.split_off(data.iter().min().unwrap());
data.sort();
assert!(set.into_iter().eq(None));
assert!(right.into_iter().eq(data));
}
#[test]
fn test_split_off_large_random_sorted() {
// Miri is too slow
let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
// special case with maximum height.
data.sort();
let mut set = BTreeSet::from_iter(data.clone());
let key = data[data.len() / 2];
let right = set.split_off(&key);
assert!(set.into_iter().eq(data.clone().into_iter().filter(|x| *x < key)));
assert!(right.into_iter().eq(data.into_iter().filter(|x| *x >= key)));
}

View File

@ -7,8 +7,8 @@
//! array-based containers are generally faster,
//! more memory efficient, and make better use of CPU cache.
//!
//! [`Vec`]: ../../vec/struct.Vec.html
//! [`VecDeque`]: ../vec_deque/struct.VecDeque.html
//! [`Vec`]: crate::vec::Vec
//! [`VecDeque`]: super::vec_deque::VecDeque
#![stable(feature = "rust1", since = "1.0.0")]
@ -50,11 +50,8 @@ struct Node<T> {
/// An iterator over the elements of a `LinkedList`.
///
/// This `struct` is created by the [`iter`] method on [`LinkedList`]. See its
/// This `struct` is created by [`LinkedList::iter()`]. See its
/// documentation for more.
///
/// [`iter`]: struct.LinkedList.html#method.iter
/// [`LinkedList`]: struct.LinkedList.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
head: Option<NonNull<Node<T>>>,
@ -80,11 +77,8 @@ impl<T> Clone for Iter<'_, T> {
/// A mutable iterator over the elements of a `LinkedList`.
///
/// This `struct` is created by the [`iter_mut`] method on [`LinkedList`]. See its
/// This `struct` is created by [`LinkedList::iter_mut()`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.LinkedList.html#method.iter_mut
/// [`LinkedList`]: struct.LinkedList.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
// We do *not* exclusively own the entire list here, references to node's `element`
@ -109,7 +103,6 @@ impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.LinkedList.html#method.into_iter
/// [`LinkedList`]: struct.LinkedList.html
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
@ -404,7 +397,7 @@ impl<T> LinkedList<T> {
/// This reuses all the nodes from `other` and moves them into `self`. After
/// this operation, `other` becomes empty.
///
/// This operation should compute in `O(1)` time and `O(1)` memory.
/// This operation should compute in *O*(1) time and *O*(1) memory.
///
/// # Examples
///
@ -561,7 +554,7 @@ impl<T> LinkedList<T> {
/// Returns `true` if the `LinkedList` is empty.
///
/// This operation should compute in `O(1)` time.
/// This operation should compute in *O*(1) time.
///
/// # Examples
///
@ -582,7 +575,7 @@ impl<T> LinkedList<T> {
/// Returns the length of the `LinkedList`.
///
/// This operation should compute in `O(1)` time.
/// This operation should compute in *O*(1) time.
///
/// # Examples
///
@ -608,7 +601,7 @@ impl<T> LinkedList<T> {
/// Removes all elements from the `LinkedList`.
///
/// This operation should compute in `O(n)` time.
/// This operation should compute in *O*(*n*) time.
///
/// # Examples
///
@ -751,7 +744,7 @@ impl<T> LinkedList<T> {
/// Adds an element first in the list.
///
/// This operation should compute in `O(1)` time.
/// This operation should compute in *O*(1) time.
///
/// # Examples
///
@ -774,7 +767,7 @@ impl<T> LinkedList<T> {
/// Removes the first element and returns it, or `None` if the list is
/// empty.
///
/// This operation should compute in `O(1)` time.
/// This operation should compute in *O*(1) time.
///
/// # Examples
///
@ -797,7 +790,7 @@ impl<T> LinkedList<T> {
/// Appends an element to the back of a list.
///
/// This operation should compute in `O(1)` time.
/// This operation should compute in *O*(1) time.
///
/// # Examples
///
@ -817,7 +810,7 @@ impl<T> LinkedList<T> {
/// Removes the last element from a list and returns it, or `None` if
/// it is empty.
///
/// This operation should compute in `O(1)` time.
/// This operation should compute in *O*(1) time.
///
/// # Examples
///
@ -838,7 +831,7 @@ impl<T> LinkedList<T> {
/// Splits the list into two at the given index. Returns everything after the given index,
/// including the index.
///
/// This operation should compute in `O(n)` time.
/// This operation should compute in *O*(*n*) time.
///
/// # Panics
///
@ -894,7 +887,7 @@ impl<T> LinkedList<T> {
/// Removes the element at the given index and returns it.
///
/// This operation should compute in `O(n)` time.
/// This operation should compute in *O*(*n*) time.
///
/// # Panics
/// Panics if at >= len
@ -1110,32 +1103,17 @@ impl<T> IterMut<'_, T> {
/// Inserts the given element just after the element most recently returned by `.next()`.
/// The inserted element does not appear in the iteration.
///
/// # Examples
///
/// ```
/// #![feature(linked_list_extras)]
///
/// use std::collections::LinkedList;
///
/// let mut list: LinkedList<_> = vec![1, 3, 4].into_iter().collect();
///
/// {
/// let mut it = list.iter_mut();
/// assert_eq!(it.next().unwrap(), &1);
/// // insert `2` after `1`
/// it.insert_next(2);
/// }
/// {
/// let vec: Vec<_> = list.into_iter().collect();
/// assert_eq!(vec, [1, 2, 3, 4]);
/// }
/// ```
/// This method will be removed soon.
#[inline]
#[unstable(
feature = "linked_list_extras",
reason = "this is probably better handled by a cursor type -- we'll see",
issue = "27794"
)]
#[rustc_deprecated(
reason = "Deprecated in favor of CursorMut methods. This method will be removed soon.",
since = "1.47.0"
)]
pub fn insert_next(&mut self, element: T) {
match self.head {
// `push_back` is okay with aliasing `element` references
@ -1163,27 +1141,17 @@ impl<T> IterMut<'_, T> {
/// Provides a reference to the next element, without changing the iterator.
///
/// # Examples
///
/// ```
/// #![feature(linked_list_extras)]
///
/// use std::collections::LinkedList;
///
/// let mut list: LinkedList<_> = vec![1, 2, 3].into_iter().collect();
///
/// let mut it = list.iter_mut();
/// assert_eq!(it.next().unwrap(), &1);
/// assert_eq!(it.peek_next().unwrap(), &2);
/// // We just peeked at 2, so it was not consumed from the iterator.
/// assert_eq!(it.next().unwrap(), &2);
/// ```
/// This method will be removed soon.
#[inline]
#[unstable(
feature = "linked_list_extras",
reason = "this is probably better handled by a cursor type -- we'll see",
issue = "27794"
)]
#[rustc_deprecated(
reason = "Deprecated in favor of CursorMut methods. This method will be removed soon.",
since = "1.47.0"
)]
pub fn peek_next(&mut self) -> Option<&mut T> {
if self.len == 0 {
None

View File

@ -0,0 +1,430 @@
use super::*;
use std::thread;
use std::vec::Vec;
use rand::{thread_rng, RngCore};
fn list_from<T: Clone>(v: &[T]) -> LinkedList<T> {
v.iter().cloned().collect()
}
pub fn check_links<T>(list: &LinkedList<T>) {
unsafe {
let mut len = 0;
let mut last_ptr: Option<&Node<T>> = None;
let mut node_ptr: &Node<T>;
match list.head {
None => {
// tail node should also be None.
assert!(list.tail.is_none());
assert_eq!(0, list.len);
return;
}
Some(node) => node_ptr = &*node.as_ptr(),
}
loop {
match (last_ptr, node_ptr.prev) {
(None, None) => {}
(None, _) => panic!("prev link for head"),
(Some(p), Some(pptr)) => {
assert_eq!(p as *const Node<T>, pptr.as_ptr() as *const Node<T>);
}
_ => panic!("prev link is none, not good"),
}
match node_ptr.next {
Some(next) => {
last_ptr = Some(node_ptr);
node_ptr = &*next.as_ptr();
len += 1;
}
None => {
len += 1;
break;
}
}
}
// verify that the tail node points to the last node.
let tail = list.tail.as_ref().expect("some tail node").as_ref();
assert_eq!(tail as *const Node<T>, node_ptr as *const Node<T>);
// check that len matches interior links.
assert_eq!(len, list.len);
}
}
#[test]
fn test_append() {
// Empty to empty
{
let mut m = LinkedList::<i32>::new();
let mut n = LinkedList::new();
m.append(&mut n);
check_links(&m);
assert_eq!(m.len(), 0);
assert_eq!(n.len(), 0);
}
// Non-empty to empty
{
let mut m = LinkedList::new();
let mut n = LinkedList::new();
n.push_back(2);
m.append(&mut n);
check_links(&m);
assert_eq!(m.len(), 1);
assert_eq!(m.pop_back(), Some(2));
assert_eq!(n.len(), 0);
check_links(&m);
}
// Empty to non-empty
{
let mut m = LinkedList::new();
let mut n = LinkedList::new();
m.push_back(2);
m.append(&mut n);
check_links(&m);
assert_eq!(m.len(), 1);
assert_eq!(m.pop_back(), Some(2));
check_links(&m);
}
// Non-empty to non-empty
let v = vec![1, 2, 3, 4, 5];
let u = vec![9, 8, 1, 2, 3, 4, 5];
let mut m = list_from(&v);
let mut n = list_from(&u);
m.append(&mut n);
check_links(&m);
let mut sum = v;
sum.extend_from_slice(&u);
assert_eq!(sum.len(), m.len());
for elt in sum {
assert_eq!(m.pop_front(), Some(elt))
}
assert_eq!(n.len(), 0);
// Let's make sure it's working properly, since we
// did some direct changes to private members.
n.push_back(3);
assert_eq!(n.len(), 1);
assert_eq!(n.pop_front(), Some(3));
check_links(&n);
}
#[test]
fn test_clone_from() {
// Short cloned from long
{
let v = vec![1, 2, 3, 4, 5];
let u = vec![8, 7, 6, 2, 3, 4, 5];
let mut m = list_from(&v);
let n = list_from(&u);
m.clone_from(&n);
check_links(&m);
assert_eq!(m, n);
for elt in u {
assert_eq!(m.pop_front(), Some(elt))
}
}
// Long cloned from short
{
let v = vec![1, 2, 3, 4, 5];
let u = vec![6, 7, 8];
let mut m = list_from(&v);
let n = list_from(&u);
m.clone_from(&n);
check_links(&m);
assert_eq!(m, n);
for elt in u {
assert_eq!(m.pop_front(), Some(elt))
}
}
// Two equal length lists
{
let v = vec![1, 2, 3, 4, 5];
let u = vec![9, 8, 1, 2, 3];
let mut m = list_from(&v);
let n = list_from(&u);
m.clone_from(&n);
check_links(&m);
assert_eq!(m, n);
for elt in u {
assert_eq!(m.pop_front(), Some(elt))
}
}
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn test_send() {
let n = list_from(&[1, 2, 3]);
thread::spawn(move || {
check_links(&n);
let a: &[_] = &[&1, &2, &3];
assert_eq!(a, &*n.iter().collect::<Vec<_>>());
})
.join()
.ok()
.unwrap();
}
#[test]
fn test_fuzz() {
for _ in 0..25 {
fuzz_test(3);
fuzz_test(16);
#[cfg(not(miri))] // Miri is too slow
fuzz_test(189);
}
}
#[test]
fn test_26021() {
// There was a bug in split_off that failed to null out the RHS's head's prev ptr.
// This caused the RHS's dtor to walk up into the LHS at drop and delete all of
// its nodes.
//
// https://github.com/rust-lang/rust/issues/26021
let mut v1 = LinkedList::new();
v1.push_front(1);
v1.push_front(1);
v1.push_front(1);
v1.push_front(1);
let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption
assert_eq!(v1.len(), 3);
assert_eq!(v1.iter().len(), 3);
assert_eq!(v1.iter().collect::<Vec<_>>().len(), 3);
}
#[test]
fn test_split_off() {
let mut v1 = LinkedList::new();
v1.push_front(1);
v1.push_front(1);
v1.push_front(1);
v1.push_front(1);
// test all splits
for ix in 0..1 + v1.len() {
let mut a = v1.clone();
let b = a.split_off(ix);
check_links(&a);
check_links(&b);
a.extend(b);
assert_eq!(v1, a);
}
}
fn fuzz_test(sz: i32) {
let mut m: LinkedList<_> = LinkedList::new();
let mut v = vec![];
for i in 0..sz {
check_links(&m);
let r: u8 = thread_rng().next_u32() as u8;
match r % 6 {
0 => {
m.pop_back();
v.pop();
}
1 => {
if !v.is_empty() {
m.pop_front();
v.remove(0);
}
}
2 | 4 => {
m.push_front(-i);
v.insert(0, -i);
}
3 | 5 | _ => {
m.push_back(i);
v.push(i);
}
}
}
check_links(&m);
let mut i = 0;
for (a, &b) in m.into_iter().zip(&v) {
i += 1;
assert_eq!(a, b);
}
assert_eq!(i, v.len());
}
#[test]
fn drain_filter_test() {
let mut m: LinkedList<u32> = LinkedList::new();
m.extend(&[1, 2, 3, 4, 5, 6]);
let deleted = m.drain_filter(|v| *v < 4).collect::<Vec<_>>();
check_links(&m);
assert_eq!(deleted, &[1, 2, 3]);
assert_eq!(m.into_iter().collect::<Vec<_>>(), &[4, 5, 6]);
}
#[test]
fn drain_to_empty_test() {
let mut m: LinkedList<u32> = LinkedList::new();
m.extend(&[1, 2, 3, 4, 5, 6]);
let deleted = m.drain_filter(|_| true).collect::<Vec<_>>();
check_links(&m);
assert_eq!(deleted, &[1, 2, 3, 4, 5, 6]);
assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
}
#[test]
fn test_cursor_move_peek() {
let mut m: LinkedList<u32> = LinkedList::new();
m.extend(&[1, 2, 3, 4, 5, 6]);
let mut cursor = m.cursor_front();
assert_eq!(cursor.current(), Some(&1));
assert_eq!(cursor.peek_next(), Some(&2));
assert_eq!(cursor.peek_prev(), None);
assert_eq!(cursor.index(), Some(0));
cursor.move_prev();
assert_eq!(cursor.current(), None);
assert_eq!(cursor.peek_next(), Some(&1));
assert_eq!(cursor.peek_prev(), Some(&6));
assert_eq!(cursor.index(), None);
cursor.move_next();
cursor.move_next();
assert_eq!(cursor.current(), Some(&2));
assert_eq!(cursor.peek_next(), Some(&3));
assert_eq!(cursor.peek_prev(), Some(&1));
assert_eq!(cursor.index(), Some(1));
let mut cursor = m.cursor_back();
assert_eq!(cursor.current(), Some(&6));
assert_eq!(cursor.peek_next(), None);
assert_eq!(cursor.peek_prev(), Some(&5));
assert_eq!(cursor.index(), Some(5));
cursor.move_next();
assert_eq!(cursor.current(), None);
assert_eq!(cursor.peek_next(), Some(&1));
assert_eq!(cursor.peek_prev(), Some(&6));
assert_eq!(cursor.index(), None);
cursor.move_prev();
cursor.move_prev();
assert_eq!(cursor.current(), Some(&5));
assert_eq!(cursor.peek_next(), Some(&6));
assert_eq!(cursor.peek_prev(), Some(&4));
assert_eq!(cursor.index(), Some(4));
let mut m: LinkedList<u32> = LinkedList::new();
m.extend(&[1, 2, 3, 4, 5, 6]);
let mut cursor = m.cursor_front_mut();
assert_eq!(cursor.current(), Some(&mut 1));
assert_eq!(cursor.peek_next(), Some(&mut 2));
assert_eq!(cursor.peek_prev(), None);
assert_eq!(cursor.index(), Some(0));
cursor.move_prev();
assert_eq!(cursor.current(), None);
assert_eq!(cursor.peek_next(), Some(&mut 1));
assert_eq!(cursor.peek_prev(), Some(&mut 6));
assert_eq!(cursor.index(), None);
cursor.move_next();
cursor.move_next();
assert_eq!(cursor.current(), Some(&mut 2));
assert_eq!(cursor.peek_next(), Some(&mut 3));
assert_eq!(cursor.peek_prev(), Some(&mut 1));
assert_eq!(cursor.index(), Some(1));
let mut cursor2 = cursor.as_cursor();
assert_eq!(cursor2.current(), Some(&2));
assert_eq!(cursor2.index(), Some(1));
cursor2.move_next();
assert_eq!(cursor2.current(), Some(&3));
assert_eq!(cursor2.index(), Some(2));
assert_eq!(cursor.current(), Some(&mut 2));
assert_eq!(cursor.index(), Some(1));
let mut m: LinkedList<u32> = LinkedList::new();
m.extend(&[1, 2, 3, 4, 5, 6]);
let mut cursor = m.cursor_back_mut();
assert_eq!(cursor.current(), Some(&mut 6));
assert_eq!(cursor.peek_next(), None);
assert_eq!(cursor.peek_prev(), Some(&mut 5));
assert_eq!(cursor.index(), Some(5));
cursor.move_next();
assert_eq!(cursor.current(), None);
assert_eq!(cursor.peek_next(), Some(&mut 1));
assert_eq!(cursor.peek_prev(), Some(&mut 6));
assert_eq!(cursor.index(), None);
cursor.move_prev();
cursor.move_prev();
assert_eq!(cursor.current(), Some(&mut 5));
assert_eq!(cursor.peek_next(), Some(&mut 6));
assert_eq!(cursor.peek_prev(), Some(&mut 4));
assert_eq!(cursor.index(), Some(4));
let mut cursor2 = cursor.as_cursor();
assert_eq!(cursor2.current(), Some(&5));
assert_eq!(cursor2.index(), Some(4));
cursor2.move_prev();
assert_eq!(cursor2.current(), Some(&4));
assert_eq!(cursor2.index(), Some(3));
assert_eq!(cursor.current(), Some(&mut 5));
assert_eq!(cursor.index(), Some(4));
}
#[test]
fn test_cursor_mut_insert() {
let mut m: LinkedList<u32> = LinkedList::new();
m.extend(&[1, 2, 3, 4, 5, 6]);
let mut cursor = m.cursor_front_mut();
cursor.insert_before(7);
cursor.insert_after(8);
check_links(&m);
assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[7, 1, 8, 2, 3, 4, 5, 6]);
let mut cursor = m.cursor_front_mut();
cursor.move_prev();
cursor.insert_before(9);
cursor.insert_after(10);
check_links(&m);
assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[10, 7, 1, 8, 2, 3, 4, 5, 6, 9]);
let mut cursor = m.cursor_front_mut();
cursor.move_prev();
assert_eq!(cursor.remove_current(), None);
cursor.move_next();
cursor.move_next();
assert_eq!(cursor.remove_current(), Some(7));
cursor.move_prev();
cursor.move_prev();
cursor.move_prev();
assert_eq!(cursor.remove_current(), Some(9));
cursor.move_next();
assert_eq!(cursor.remove_current(), Some(10));
check_links(&m);
assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[1, 8, 2, 3, 4, 5, 6]);
let mut cursor = m.cursor_front_mut();
let mut p: LinkedList<u32> = LinkedList::new();
p.extend(&[100, 101, 102, 103]);
let mut q: LinkedList<u32> = LinkedList::new();
q.extend(&[200, 201, 202, 203]);
cursor.splice_after(p);
cursor.splice_before(q);
check_links(&m);
assert_eq!(
m.iter().cloned().collect::<Vec<_>>(),
&[200, 201, 202, 203, 1, 100, 101, 102, 103, 8, 2, 3, 4, 5, 6]
);
let mut cursor = m.cursor_front_mut();
cursor.move_prev();
let tmp = cursor.split_before();
assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
m = tmp;
let mut cursor = m.cursor_front_mut();
cursor.move_next();
cursor.move_next();
cursor.move_next();
cursor.move_next();
cursor.move_next();
cursor.move_next();
let tmp = cursor.split_after();
assert_eq!(tmp.into_iter().collect::<Vec<_>>(), &[102, 103, 8, 2, 3, 4, 5, 6]);
check_links(&m);
assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[200, 201, 202, 203, 1, 100, 101]);
}

View File

@ -1,7 +1,7 @@
//! A double-ended queue implemented with a growable ring buffer.
//!
//! This queue has `O(1)` amortized inserts and removals from both ends of the
//! container. It also has `O(1)` indexing like a vector. The contained elements
//! This queue has *O*(1) amortized inserts and removals from both ends of the
//! container. It also has *O*(1) indexing like a vector. The contained elements
//! are not required to be copyable, and the queue will be sendable if the
//! contained type is sendable.
@ -9,7 +9,6 @@
// ignore-tidy-filelength
use core::array::LengthAtMost32;
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
@ -1512,7 +1511,7 @@ impl<T> VecDeque<T> {
/// Removes an element from anywhere in the `VecDeque` and returns it,
/// replacing it with the first element.
///
/// This does not preserve ordering, but is `O(1)`.
/// This does not preserve ordering, but is *O*(1).
///
/// Returns `None` if `index` is out of bounds.
///
@ -1547,7 +1546,7 @@ impl<T> VecDeque<T> {
/// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the
/// last element.
///
/// This does not preserve ordering, but is `O(1)`.
/// This does not preserve ordering, but is *O*(1).
///
/// Returns `None` if `index` is out of bounds.
///
@ -2331,7 +2330,7 @@ impl<T> VecDeque<T> {
///
/// # Complexity
///
/// Takes `O(min(mid, len() - mid))` time and no extra space.
/// Takes `*O*(min(mid, len() - mid))` time and no extra space.
///
/// # Examples
///
@ -2374,7 +2373,7 @@ impl<T> VecDeque<T> {
///
/// # Complexity
///
/// Takes `O(min(k, len() - k))` time and no extra space.
/// Takes `*O*(min(k, len() - k))` time and no extra space.
///
/// # Examples
///
@ -2889,9 +2888,9 @@ macro_rules! __impl_slice_eq1 {
__impl_slice_eq1! { [] VecDeque<A>, Vec<B>, }
__impl_slice_eq1! { [] VecDeque<A>, &[B], }
__impl_slice_eq1! { [] VecDeque<A>, &mut [B], }
__impl_slice_eq1! { [const N: usize] VecDeque<A>, [B; N], [B; N]: LengthAtMost32 }
__impl_slice_eq1! { [const N: usize] VecDeque<A>, &[B; N], [B; N]: LengthAtMost32 }
__impl_slice_eq1! { [const N: usize] VecDeque<A>, &mut [B; N], [B; N]: LengthAtMost32 }
__impl_slice_eq1! { [const N: usize] VecDeque<A>, [B; N], }
__impl_slice_eq1! { [const N: usize] VecDeque<A>, &[B; N], }
__impl_slice_eq1! { [const N: usize] VecDeque<A>, &mut [B; N], }
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialOrd> PartialOrd for VecDeque<A> {
@ -3076,7 +3075,7 @@ impl<T> From<VecDeque<T>> for Vec<T> {
/// [`Vec<T>`]: crate::vec::Vec
/// [`VecDeque<T>`]: crate::collections::VecDeque
///
/// This never needs to re-allocate, but does need to do `O(n)` data movement if
/// This never needs to re-allocate, but does need to do *O*(*n*) data movement if
/// the circular buffer doesn't happen to be at the beginning of the allocation.
///
/// # Examples
@ -3084,7 +3083,7 @@ impl<T> From<VecDeque<T>> for Vec<T> {
/// ```
/// use std::collections::VecDeque;
///
/// // This one is O(1).
/// // This one is *O*(1).
/// let deque: VecDeque<_> = (1..5).collect();
/// let ptr = deque.as_slices().0.as_ptr();
/// let vec = Vec::from(deque);

View File

@ -0,0 +1,572 @@
use super::*;
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_back(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_front(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
})
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
})
}
#[test]
fn test_swap_front_back_remove() {
fn test(back: bool) {
// This test checks that every single combination of tail position and length is tested.
// Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
let usable_cap = tester.capacity();
let final_len = usable_cap / 2;
for len in 0..final_len {
let expected: VecDeque<_> =
if back { (0..len).collect() } else { (0..len).rev().collect() };
for tail_pos in 0..usable_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
if back {
for i in 0..len * 2 {
tester.push_front(i);
}
for i in 0..len {
assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
}
} else {
for i in 0..len * 2 {
tester.push_back(i);
}
for i in 0..len {
let idx = tester.len() - 1 - i;
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
test(true);
test(false);
}
#[test]
fn test_insert() {
// This test checks that every single combination of tail position, length, and
// insertion position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* insertion
let minlen = if cfg!(miri) { cap - 1 } else { 1 }; // Miri is too slow
for len in minlen..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_insert in 0..len {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn make_contiguous_big_tail() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..3 {
tester.push_back(i);
}
for i in 3..10 {
tester.push_front(i);
}
// 012......9876543
assert_eq!(tester.capacity(), 15);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
let expected_start = tester.head;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
fn make_contiguous_big_head() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..8 {
tester.push_back(i);
}
for i in 8..10 {
tester.push_front(i);
}
// 01234567......98
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
fn make_contiguous_small_free() {
let mut tester = VecDeque::with_capacity(15);
for i in 'A' as u8..'I' as u8 {
tester.push_back(i as char);
}
for i in 'I' as u8..'N' as u8 {
tester.push_front(i as char);
}
// ABCDEFGH...MLKJI
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!(
(&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
tester.as_slices()
);
tester.clear();
for i in 'I' as u8..'N' as u8 {
tester.push_back(i as char);
}
for i in 'A' as u8..'I' as u8 {
tester.push_front(i as char);
}
// IJKLM...HGFEDCBA
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!(
(&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
tester.as_slices()
);
}
#[test]
fn test_remove() {
// This test checks that every single combination of tail position, length, and
// removal position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* removal
let minlen = if cfg!(miri) { cap - 2 } else { 0 }; // Miri is too slow
for len in minlen..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_remove in 0..=len {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
}
tester.push_back(i);
}
if to_remove == len {
tester.push_back(1234);
}
tester.remove(to_remove);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_range() {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
for len in minlen..=cap {
for tail in 0..=cap {
for start in 0..=len {
for end in start..=len {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
tester.push_back(i);
}
// Check that we iterate over the correct values
let range: VecDeque<_> = tester.range(start..end).copied().collect();
let expected: VecDeque<_> = (start..end).collect();
assert_eq!(range, expected);
}
}
}
}
}
#[test]
fn test_range_mut() {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
for len in 0..=cap {
for tail in 0..=cap {
for start in 0..=len {
for end in start..=len {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
tester.push_back(i);
}
let head_was = tester.head;
let tail_was = tester.tail;
// Check that we iterate over the correct values
let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
let expected: VecDeque<_> = (start..end).collect();
assert_eq!(range, expected);
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert_eq!(tester.tail, tail_was);
assert_eq!(tester.head, head_was);
}
}
}
}
}
#[test]
fn test_drain() {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
for len in 0..=cap {
for tail in 0..=cap {
for drain_start in 0..=len {
for drain_end in drain_start..=len {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
tester.push_back(i);
}
// Check that we drain the correct values
let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
assert_eq!(drained, drained_expected);
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
assert_eq!(expected, tester);
}
}
}
}
}
#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
// is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
tester.reserve(63);
let max_cap = tester.capacity();
for len in 0..=cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..=max_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
#[test]
fn test_split_off() {
// This test checks that every single combination of tail position, length, and
// split position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *before* splitting
let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
for len in minlen..cap {
// index to split at
for at in 0..=len {
// 0, 1, 2, .., at - 1 (may be empty)
let expected_self = (0..).take(at).collect::<VecDeque<_>>();
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(result.tail < result.cap());
assert!(result.head < result.cap());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
}
}
}
#[test]
fn test_from_vec() {
use crate::vec::Vec;
for cap in 0..35 {
for len in 0..=cap {
let mut vec = Vec::with_capacity(cap);
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
}
#[test]
fn test_vec_from_vecdeque() {
use crate::vec::Vec;
fn create_vec_and_test_convert(capacity: usize, offset: usize, len: usize) {
let mut vd = VecDeque::with_capacity(capacity);
for _ in 0..offset {
vd.push_back(0);
vd.pop_front();
}
vd.extend(0..len);
let vec: Vec<_> = Vec::from(vd.clone());
assert_eq!(vec.len(), vd.len());
assert!(vec.into_iter().eq(vd));
}
// Miri is too slow
let max_pwr = if cfg!(miri) { 5 } else { 7 };
for cap_pwr in 0..max_pwr {
// Make capacity as a (2^x)-1, so that the ring size is 2^x
let cap = (2i32.pow(cap_pwr) - 1) as usize;
// In these cases there is enough free space to solve it with copies
for len in 0..((cap + 1) / 2) {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
// Now there's not (necessarily) space to straighten the ring with simple copies,
// the ring will use swapping when:
// (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
// right block size > free space && left block size > free space
for len in ((cap + 1) / 2)..cap {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
}
}
#[test]
fn test_clone_from() {
let m = vec![1; 8];
let n = vec![2; 12];
let limit = if cfg!(miri) { 4 } else { 8 }; // Miri is too slow
for pfv in 0..limit {
for pfu in 0..limit {
for longer in 0..2 {
let (vr, ur) = if longer == 0 { (&m, &n) } else { (&n, &m) };
let mut v = VecDeque::from(vr.clone());
for _ in 0..pfv {
v.push_front(1);
}
let mut u = VecDeque::from(ur.clone());
for _ in 0..pfu {
u.push_front(2);
}
v.clone_from(&u);
assert_eq!(&v, &u);
}
}
}
}
#[test]
fn test_vec_deque_truncate_drop() {
static mut DROPS: u32 = 0;
#[derive(Clone)]
struct Elem(i32);
impl Drop for Elem {
fn drop(&mut self) {
unsafe {
DROPS += 1;
}
}
}
let v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)];
for push_front in 0..=v.len() {
let v = v.clone();
let mut tester = VecDeque::with_capacity(5);
for (index, elem) in v.into_iter().enumerate() {
if index < push_front {
tester.push_front(elem);
} else {
tester.push_back(elem);
}
}
assert_eq!(unsafe { DROPS }, 0);
tester.truncate(3);
assert_eq!(unsafe { DROPS }, 2);
tester.truncate(0);
assert_eq!(unsafe { DROPS }, 5);
unsafe {
DROPS = 0;
}
}
}
#[test]
fn issue_53529() {
use crate::boxed::Box;
let mut dst = VecDeque::new();
dst.push_front(Box::new(1));
dst.push_front(Box::new(2));
assert_eq!(*dst.pop_back().unwrap(), 1);
let mut src = VecDeque::new();
src.push_front(Box::new(2));
dst.append(&mut src);
for a in dst {
assert_eq!(*a, 2);
}
}

579
library/alloc/src/fmt.rs Normal file
View File

@ -0,0 +1,579 @@
//! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters that explicitly name their argument do not affect
//! parameters that do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension that allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments that have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! default for numeric formatters is also a space character but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeros are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeros are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print three significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing with formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`fmt::Result`]: Result
//! [`Result`]: core::result::Result
//! [`std::fmt::Error`]: Error
//! [`write!`]: core::write
//! [`write`]: core::write
//! [`format!`]: crate::format
//! [`to_string`]: crate::string::ToString
//! [`writeln!`]: core::writeln
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html
//! [`println!`]: ../../std/macro.println.html
//! [`eprint!`]: ../../std/macro.eprint.html
//! [`eprintln!`]: ../../std/macro.eprintln.html
//! [`format_args!`]: core::format_args
//! [`fmt::Arguments`]: Arguments
//! [`format`]: crate::format
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}

191
library/alloc/src/lib.rs Normal file
View File

@ -0,0 +1,191 @@
//! # The Rust core allocation and collections library
//!
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
//! This library, like libcore, normally doesnt need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so theyd use this crate instead.
//!
//! ## Boxed values
//!
//! The [`Box`] type is a smart pointer type. There can only be one owner of a
//! [`Box`], and the owner can decide to mutate the contents, which live on the
//! heap.
//!
//! This type can be sent among threads efficiently as the size of a `Box` value
//! is the same as that of a pointer. Tree-like data structures are often built
//! with boxes because each node often has only one owner, the parent.
//!
//! ## Reference counted pointers
//!
//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
//! only allows access to `&T`, a shared reference.
//!
//! This type is useful when inherited mutability (such as using [`Box`]) is too
//! constraining for an application, and is often paired with the [`Cell`] or
//! [`RefCell`] types in order to allow mutation.
//!
//! ## Atomically reference counted pointers
//!
//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
//! provides all the same functionality of [`Rc`], except it requires that the
//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
//! sendable while [`Rc<T>`][`Rc`] is not.
//!
//! This type allows for shared access to the contained data, and is often
//! paired with synchronization primitives such as mutexes to allow mutation of
//! shared resources.
//!
//! ## Collections
//!
//! Implementations of the most common general purpose data structures are
//! defined in this library. They are re-exported through the
//! [standard collections library](../std/collections/index.html).
//!
//! ## Heap interfaces
//!
//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
//! default global allocator. It is not compatible with the libc allocator API.
//!
//! [`Arc`]: sync
//! [`Box`]: boxed
//! [`Cell`]: core::cell
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
html_root_url = "https://doc.rust-lang.org/nightly/",
html_playground_url = "https://play.rust-lang.org/",
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
)]
#![no_std]
#![needs_allocator]
#![warn(deprecated_in_future)]
#![warn(missing_docs)]
#![warn(missing_debug_implementations)]
#![allow(explicit_outlives_requirements)]
#![allow(incomplete_features)]
#![deny(unsafe_op_in_unsafe_fn)]
#![cfg_attr(not(test), feature(generator_trait))]
#![cfg_attr(test, feature(test))]
#![feature(allocator_api)]
#![feature(array_chunks)]
#![feature(allow_internal_unstable)]
#![feature(arbitrary_self_types)]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(btree_drain_filter)]
#![feature(cfg_sanitize)]
#![feature(cfg_target_has_atomic)]
#![feature(coerce_unsized)]
#![feature(const_btree_new)]
#![feature(const_generics)]
#![feature(const_in_array_repeat_expressions)]
#![feature(cow_is_borrowed)]
#![feature(deque_range)]
#![feature(dispatch_from_dyn)]
#![feature(core_intrinsics)]
#![feature(container_error_extra)]
#![feature(dropck_eyepatch)]
#![feature(exact_size_is_empty)]
#![feature(exclusive_range_pattern)]
#![feature(extend_one)]
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(fundamental)]
#![feature(internal_uninit_const)]
#![feature(lang_items)]
#![feature(layout_for_ptr)]
#![feature(libc)]
#![feature(map_first_last)]
#![feature(map_into_keys_values)]
#![feature(negative_impls)]
#![feature(new_uninit)]
#![feature(nll)]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(optin_builtin_traits)]
#![feature(or_patterns)]
#![feature(pattern)]
#![feature(ptr_internals)]
#![feature(raw_ref_op)]
#![feature(rustc_attrs)]
#![feature(receiver_trait)]
#![feature(min_specialization)]
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(staged_api)]
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(trusted_len)]
#![feature(try_reserve)]
#![feature(unboxed_closures)]
#![feature(unicode_internals)]
#![feature(unsafe_block_in_unsafe_fn)]
#![feature(unsize)]
#![feature(unsized_locals)]
#![feature(allocator_internals)]
#![feature(slice_partition_dedup)]
#![feature(maybe_uninit_extra, maybe_uninit_slice)]
#![feature(alloc_layout_extra)]
#![feature(try_trait)]
#![feature(associated_type_bounds)]
// Allow testing this library
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate test;
// Module with internal macros used by other modules (needs to be included before other modules).
#[macro_use]
mod macros;
// Heaps provided for low-level allocation strategies
pub mod alloc;
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
// duplicating the lang-items when building in test cfg; but also need
// to allow code to have `use boxed::Box;` declarations.
#[cfg(not(test))]
pub mod boxed;
#[cfg(test)]
mod boxed {
pub use std::boxed::Box;
}
pub mod borrow;
pub mod collections;
pub mod fmt;
pub mod prelude;
pub mod raw_vec;
pub mod rc;
pub mod slice;
pub mod str;
pub mod string;
#[cfg(target_has_atomic = "ptr")]
pub mod sync;
#[cfg(target_has_atomic = "ptr")]
pub mod task;
#[cfg(test)]
mod tests;
pub mod vec;
#[cfg(not(test))]
mod std {
pub use core::ops; // RangeFull
}
#[doc(hidden)]
#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
pub mod __export {
pub use core::format_args;
}

109
library/alloc/src/macros.rs Normal file
View File

@ -0,0 +1,109 @@
/// Creates a [`Vec`] containing the arguments.
///
/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Vec`] containing a given list of elements:
///
/// ```
/// let v = vec![1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
/// - Create a [`Vec`] from a given element and size:
///
/// ```
/// let v = vec![1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// Note that unlike array expressions this syntax supports all elements
/// which implement [`Clone`] and the number of elements doesn't have to be
/// a constant.
///
/// This will use `clone` to duplicate an expression, so one should be careful
/// using this with types having a nonstandard `Clone` implementation. For
/// example, `vec![Rc::new(1); 5]` will create a vector of five references
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
/// [`Vec`]: crate::vec::Vec
#[cfg(not(test))]
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[allow_internal_unstable(box_syntax)]
macro_rules! vec {
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
($($x:expr),+ $(,)?) => (
<[_]>::into_vec(box [$($x),+])
);
}
// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
// required for this macro definition, is not available. Instead use the
// `slice::into_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
macro_rules! vec {
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
($($x:expr),*) => (
$crate::slice::into_vec(box [$($x),*])
);
($($x:expr,)*) => (vec![$($x),*])
}
/// Creates a `String` using interpolation of runtime expressions.
///
/// The first argument `format!` receives is a format string. This must be a string
/// literal. The power of the formatting string is in the `{}`s contained.
///
/// Additional parameters passed to `format!` replace the `{}`s within the
/// formatting string in the order given unless named or positional parameters
/// are used; see [`std::fmt`][fmt] for more information.
///
/// A common use for `format!` is concatenation and interpolation of strings.
/// The same convention is used with [`print!`] and [`write!`] macros,
/// depending on the intended destination of the string.
///
/// To convert a single value to a string, use the [`to_string`] method. This
/// will use the [`Display`] formatting trait.
///
/// [fmt]: core::fmt
/// [`print!`]: ../std/macro.print.html
/// [`write!`]: core::write
/// [`to_string`]: crate::string::ToString
/// [`Display`]: core::fmt::Display
///
/// # Panics
///
/// `format!` panics if a formatting trait implementation returns an error.
/// This indicates an incorrect implementation
/// since `fmt::Write for String` never returns an error itself.
///
/// # Examples
///
/// ```
/// format!("test");
/// format!("hello {}", "world!");
/// format!("x = {}, y = {y}", 10, y = 30);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
macro_rules! format {
($($arg:tt)*) => {{
let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
res
}}
}

View File

@ -1,25 +1,27 @@
#![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "none")]
#![doc(hidden)]
use core::alloc::{LayoutErr, MemoryBlock};
use core::alloc::LayoutErr;
use core::cmp;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{NonNull, Unique};
use core::slice;
use crate::alloc::{
handle_alloc_error,
AllocInit::{self, *},
AllocRef, Global, Layout,
ReallocPlacement::{self, *},
};
use crate::alloc::{handle_alloc_error, AllocRef, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError::{self, *};
#[cfg(test)]
mod tests;
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
@ -156,14 +158,14 @@ impl<T, A: AllocRef> RawVec<T, A> {
/// allocator for the returned `RawVec`.
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, Uninitialized, alloc)
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, Zeroed, alloc)
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
fn allocate_in(capacity: usize, init: AllocInit, mut alloc: A) -> Self {
@ -180,14 +182,18 @@ impl<T, A: AllocRef> RawVec<T, A> {
Ok(_) => {}
Err(_) => capacity_overflow(),
}
let memory = match alloc.alloc(layout, init) {
Ok(memory) => memory,
let result = match init {
AllocInit::Uninitialized => alloc.alloc(layout),
AllocInit::Zeroed => alloc.alloc_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
};
Self {
ptr: unsafe { Unique::new_unchecked(memory.ptr.cast().as_ptr()) },
cap: Self::capacity_from_bytes(memory.size),
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
cap: Self::capacity_from_bytes(ptr.len()),
alloc,
}
}
@ -197,13 +203,15 @@ impl<T, A: AllocRef> RawVec<T, A> {
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc: a }
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@ -358,7 +366,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
///
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
match self.shrink(amount, MayMove) {
match self.shrink(amount) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
@ -378,9 +386,9 @@ impl<T, A: AllocRef> RawVec<T, A> {
excess / mem::size_of::<T>()
}
fn set_memory(&mut self, memory: MemoryBlock) {
self.ptr = unsafe { Unique::new_unchecked(memory.ptr.cast().as_ptr()) };
self.cap = Self::capacity_from_bytes(memory.size);
fn set_ptr(&mut self, ptr: NonNull<[u8]>) {
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
self.cap = Self::capacity_from_bytes(ptr.len());
}
// This method is usually instantiated many times. So we want it to be as
@ -426,8 +434,8 @@ impl<T, A: AllocRef> RawVec<T, A> {
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let memory = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_memory(memory);
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr(ptr);
Ok(())
}
@ -445,30 +453,24 @@ impl<T, A: AllocRef> RawVec<T, A> {
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let memory = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_memory(memory);
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr(ptr);
Ok(())
}
fn shrink(
&mut self,
amount: usize,
placement: ReallocPlacement,
) -> Result<(), TryReserveError> {
fn shrink(&mut self, amount: usize) -> Result<(), TryReserveError> {
assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
let new_size = amount * mem::size_of::<T>();
let memory = unsafe {
self.alloc.shrink(ptr, layout, new_size, placement).map_err(|_| {
TryReserveError::AllocError {
layout: Layout::from_size_align_unchecked(new_size, layout.align()),
non_exhaustive: (),
}
let ptr = unsafe {
self.alloc.shrink(ptr, layout, new_size).map_err(|_| TryReserveError::AllocError {
layout: Layout::from_size_align_unchecked(new_size, layout.align()),
non_exhaustive: (),
})?
};
self.set_memory(memory);
self.set_ptr(ptr);
Ok(())
}
}
@ -481,7 +483,7 @@ fn finish_grow<A>(
new_layout: Result<Layout, LayoutErr>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<MemoryBlock, TryReserveError>
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: AllocRef,
{
@ -492,9 +494,9 @@ where
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe { alloc.grow(ptr, old_layout, new_layout.size(), MayMove, Uninitialized) }
unsafe { alloc.grow(ptr, old_layout, new_layout.size()) }
} else {
alloc.alloc(new_layout, Uninitialized)
alloc.alloc(new_layout)
}
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?;

View File

@ -0,0 +1,78 @@
use super::*;
#[test]
fn allocator_param() {
use crate::alloc::AllocErr;
// Writing a test of integration between third-party
// allocators and `RawVec` is a little tricky because the `RawVec`
// API does not expose fallible allocation methods, so we
// cannot check what happens when allocator is exhausted
// (beyond detecting a panic).
//
// Instead, this just checks that the `RawVec` methods do at
// least go through the Allocator API when it reserves
// storage.
// A dumb allocator that consumes a fixed amount of fuel
// before allocation attempts start failing.
struct BoundedAlloc {
fuel: usize,
}
unsafe impl AllocRef for BoundedAlloc {
fn alloc(&mut self, layout: Layout) -> Result<NonNull<[u8]>, AllocErr> {
let size = layout.size();
if size > self.fuel {
return Err(AllocErr);
}
match Global.alloc(layout) {
ok @ Ok(_) => {
self.fuel -= size;
ok
}
err @ Err(_) => err,
}
}
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
unsafe { Global.dealloc(ptr, layout) }
}
}
let a = BoundedAlloc { fuel: 500 };
let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
assert_eq!(v.alloc.fuel, 450);
v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel, 250);
}
#[test]
fn reserve_does_not_overallocate() {
{
let mut v: RawVec<u32> = RawVec::new();
// First, `reserve` allocates like `reserve_exact`.
v.reserve(0, 9);
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 7);
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec<u32> = RawVec::new();
v.reserve(0, 12);
assert_eq!(12, v.capacity());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
}

View File

@ -214,18 +214,15 @@
//! }
//! ```
//!
//! [`Rc`]: struct.Rc.html
//! [`Weak`]: struct.Weak.html
//! [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
//! [`Cell`]: ../../std/cell/struct.Cell.html
//! [`RefCell`]: ../../std/cell/struct.RefCell.html
//! [send]: ../../std/marker/trait.Send.html
//! [clone]: Clone::clone
//! [`Cell`]: core::cell::Cell
//! [`RefCell`]: core::cell::RefCell
//! [send]: core::marker::Send
//! [arc]: ../../std/sync/struct.Arc.html
//! [`Deref`]: ../../std/ops/trait.Deref.html
//! [downgrade]: struct.Rc.html#method.downgrade
//! [upgrade]: struct.Weak.html#method.upgrade
//! [`None`]: ../../std/option/enum.Option.html#variant.None
//! [mutability]: ../../std/cell/index.html#introducing-mutability-inside-of-something-immutable
//! [`Deref`]: core::ops::Deref
//! [downgrade]: Rc::downgrade
//! [upgrade]: Weak::upgrade
//! [mutability]: core::cell#introducing-mutability-inside-of-something-immutable
#![stable(feature = "rust1", since = "1.0.0")]
@ -235,7 +232,6 @@ use crate::boxed::Box;
use std::boxed::Box;
use core::any::Any;
use core::array::LengthAtMost32;
use core::borrow;
use core::cell::Cell;
use core::cmp::Ordering;
@ -251,7 +247,7 @@ use core::pin::Pin;
use core::ptr::{self, NonNull};
use core::slice::from_raw_parts_mut;
use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
use crate::alloc::{box_free, handle_alloc_error, AllocErr, AllocRef, Global, Layout};
use crate::borrow::{Cow, ToOwned};
use crate::string::String;
use crate::vec::Vec;
@ -353,9 +349,11 @@ impl<T> Rc<T> {
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_uninit() -> Rc<mem::MaybeUninit<T>> {
unsafe {
Rc::from_ptr(Rc::allocate_for_layout(Layout::new::<T>(), |mem| {
mem as *mut RcBox<mem::MaybeUninit<T>>
}))
Rc::from_ptr(Rc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.alloc(layout),
|mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
))
}
}
@ -382,9 +380,11 @@ impl<T> Rc<T> {
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_zeroed() -> Rc<mem::MaybeUninit<T>> {
unsafe {
let mut uninit = Self::new_uninit();
ptr::write_bytes::<T>(Rc::get_mut_unchecked(&mut uninit).as_mut_ptr(), 0, 1);
uninit
Rc::from_ptr(Rc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.alloc_zeroed(layout),
|mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
))
}
}
@ -397,13 +397,11 @@ impl<T> Rc<T> {
/// Returns the inner value, if the `Rc` has exactly one strong reference.
///
/// Otherwise, an [`Err`][result] is returned with the same `Rc` that was
/// Otherwise, an [`Err`] is returned with the same `Rc` that was
/// passed in.
///
/// This will succeed even if there are outstanding weak references.
///
/// [result]: ../../std/result/enum.Result.html
///
/// # Examples
///
/// ```
@ -466,6 +464,40 @@ impl<T> Rc<[T]> {
pub fn new_uninit_slice(len: usize) -> Rc<[mem::MaybeUninit<T>]> {
unsafe { Rc::from_ptr(Rc::allocate_for_slice(len)) }
}
/// Constructs a new reference-counted slice with uninitialized contents, with the memory being
/// filled with `0` bytes.
///
/// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
/// incorrect usage of this method.
///
/// # Examples
///
/// ```
/// #![feature(new_uninit)]
///
/// use std::rc::Rc;
///
/// let values = Rc::<[u32]>::new_zeroed_slice(3);
/// let values = unsafe { values.assume_init() };
///
/// assert_eq!(*values, [0, 0, 0])
/// ```
///
/// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_zeroed_slice(len: usize) -> Rc<[mem::MaybeUninit<T>]> {
unsafe {
Rc::from_ptr(Rc::allocate_for_layout(
Layout::array::<T>(len).unwrap(),
|layout| Global.alloc_zeroed(layout),
|mem| {
ptr::slice_from_raw_parts_mut(mem as *mut T, len)
as *mut RcBox<[mem::MaybeUninit<T>]>
},
))
}
}
}
impl<T> Rc<mem::MaybeUninit<T>> {
@ -554,7 +586,7 @@ impl<T: ?Sized> Rc<T> {
/// To avoid a memory leak the pointer must be converted back to an `Rc` using
/// [`Rc::from_raw`][from_raw].
///
/// [from_raw]: struct.Rc.html#method.from_raw
/// [from_raw]: Rc::from_raw
///
/// # Examples
///
@ -614,8 +646,8 @@ impl<T: ?Sized> Rc<T> {
/// This function is unsafe because improper use may lead to memory unsafety,
/// even if the returned `Rc<T>` is never accessed.
///
/// [into_raw]: struct.Rc.html#method.into_raw
/// [transmute]: ../../std/mem/fn.transmute.html
/// [into_raw]: Rc::into_raw
/// [transmute]: core::mem::transmute
///
/// # Examples
///
@ -646,32 +678,7 @@ impl<T: ?Sized> Rc<T> {
unsafe { Self::from_ptr(rc_ptr) }
}
/// Consumes the `Rc`, returning the wrapped pointer as `NonNull<T>`.
///
/// # Examples
///
/// ```
/// #![feature(rc_into_raw_non_null)]
/// #![allow(deprecated)]
///
/// use std::rc::Rc;
///
/// let x = Rc::new("hello".to_owned());
/// let ptr = Rc::into_raw_non_null(x);
/// let deref = unsafe { ptr.as_ref() };
/// assert_eq!(deref, "hello");
/// ```
#[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
#[rustc_deprecated(since = "1.44.0", reason = "use `Rc::into_raw` instead")]
#[inline]
pub fn into_raw_non_null(this: Self) -> NonNull<T> {
// safe because Rc guarantees its pointer is non-null
unsafe { NonNull::new_unchecked(Rc::into_raw(this) as *mut _) }
}
/// Creates a new [`Weak`][weak] pointer to this allocation.
///
/// [weak]: struct.Weak.html
/// Creates a new [`Weak`] pointer to this allocation.
///
/// # Examples
///
@ -690,9 +697,7 @@ impl<T: ?Sized> Rc<T> {
Weak { ptr: this.ptr }
}
/// Gets the number of [`Weak`][weak] pointers to this allocation.
///
/// [weak]: struct.Weak.html
/// Gets the number of [`Weak`] pointers to this allocation.
///
/// # Examples
///
@ -728,17 +733,15 @@ impl<T: ?Sized> Rc<T> {
this.strong()
}
/// Returns `true` if there are no other `Rc` or [`Weak`][weak] pointers to
/// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
/// this allocation.
///
/// [weak]: struct.Weak.html
#[inline]
fn is_unique(this: &Self) -> bool {
Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1
}
/// Returns a mutable reference into the given `Rc`, if there are
/// no other `Rc` or [`Weak`][weak] pointers to the same allocation.
/// no other `Rc` or [`Weak`] pointers to the same allocation.
///
/// Returns [`None`] otherwise, because it is not safe to
/// mutate a shared value.
@ -746,10 +749,8 @@ impl<T: ?Sized> Rc<T> {
/// See also [`make_mut`][make_mut], which will [`clone`][clone]
/// the inner value when there are other pointers.
///
/// [weak]: struct.Weak.html
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [make_mut]: struct.Rc.html#method.make_mut
/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
/// [make_mut]: Rc::make_mut
/// [clone]: Clone::clone
///
/// # Examples
///
@ -774,7 +775,7 @@ impl<T: ?Sized> Rc<T> {
///
/// See also [`get_mut`], which is safe and does appropriate checks.
///
/// [`get_mut`]: struct.Rc.html#method.get_mut
/// [`get_mut`]: Rc::get_mut
///
/// # Safety
///
@ -820,7 +821,7 @@ impl<T: ?Sized> Rc<T> {
/// assert!(!Rc::ptr_eq(&five, &other_five));
/// ```
///
/// [`ptr::eq`]: ../../std/ptr/fn.eq.html
/// [`ptr::eq`]: core::ptr::eq
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr.as_ptr() == other.ptr.as_ptr()
}
@ -838,9 +839,8 @@ impl<T: Clone> Rc<T> {
///
/// See also [`get_mut`], which will fail rather than cloning.
///
/// [`Weak`]: struct.Weak.html
/// [`clone`]: ../../std/clone/trait.Clone.html#tymethod.clone
/// [`get_mut`]: struct.Rc.html#method.get_mut
/// [`clone`]: Clone::clone
/// [`get_mut`]: Rc::get_mut
///
/// # Examples
///
@ -943,6 +943,7 @@ impl<T: ?Sized> Rc<T> {
/// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
unsafe fn allocate_for_layout(
value_layout: Layout,
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocErr>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> *mut RcBox<T> {
// Calculate layout using the given value layout.
@ -952,12 +953,10 @@ impl<T: ?Sized> Rc<T> {
let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
// Allocate for the layout.
let mem = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| handle_alloc_error(layout));
let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
// Initialize the RcBox
let inner = mem_to_rcbox(mem.ptr.as_ptr());
let inner = mem_to_rcbox(ptr.as_non_null_ptr().as_ptr());
unsafe {
debug_assert_eq!(Layout::for_value(&*inner), layout);
@ -972,9 +971,11 @@ impl<T: ?Sized> Rc<T> {
unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
// Allocate for the `RcBox<T>` using the given value.
unsafe {
Self::allocate_for_layout(Layout::for_value(&*ptr), |mem| {
set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>
})
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.alloc(layout),
|mem| set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>,
)
}
}
@ -1005,9 +1006,11 @@ impl<T> Rc<[T]> {
/// Allocates an `RcBox<[T]>` with the given length.
unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
unsafe {
Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {
ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>
})
Self::allocate_for_layout(
Layout::array::<T>(len).unwrap(),
|layout| Global.alloc(layout),
|mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>,
)
}
}
}
@ -1143,8 +1146,6 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
/// drop(foo); // Doesn't print anything
/// drop(foo2); // Prints "dropped!"
/// ```
///
/// [`Weak`]: ../../std/rc/struct.Weak.html
fn drop(&mut self) {
unsafe {
self.dec_strong();
@ -1516,10 +1517,7 @@ where
}
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]>
where
[T; N]: LengthAtMost32,
{
impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]> {
type Error = Rc<[T]>;
fn try_from(boxed_slice: Rc<[T]>) -> Result<Self, Self::Error> {
@ -1629,11 +1627,7 @@ impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
///
/// The typical way to obtain a `Weak` pointer is to call [`Rc::downgrade`].
///
/// [`Rc`]: struct.Rc.html
/// [`Rc::downgrade`]: struct.Rc.html#method.downgrade
/// [`upgrade`]: struct.Weak.html#method.upgrade
/// [`Option`]: ../../std/option/enum.Option.html
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`upgrade`]: Weak::upgrade
#[stable(feature = "rc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
@ -1660,8 +1654,7 @@ impl<T> Weak<T> {
/// Constructs a new `Weak<T>`, without allocating any memory.
/// Calling [`upgrade`] on the return value always gives [`None`].
///
/// [`upgrade`]: #method.upgrade
/// [`None`]: ../../std/option/enum.Option.html
/// [`upgrade`]: Weak::upgrade
///
/// # Examples
///
@ -1700,7 +1693,7 @@ impl<T> Weak<T> {
/// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
/// ```
///
/// [`null`]: ../../std/ptr/fn.null.html
/// [`null`]: core::ptr::null
#[stable(feature = "rc_as_ptr", since = "1.45.0")]
pub fn as_ptr(&self) -> *const T {
let ptr: *mut RcBox<T> = NonNull::as_ptr(self.ptr);
@ -1719,8 +1712,9 @@ impl<T> Weak<T> {
/// Consumes the `Weak<T>` and turns it into a raw pointer.
///
/// This converts the weak pointer into a raw pointer, preserving the original weak count. It
/// can be turned back into the `Weak<T>` with [`from_raw`].
/// This converts the weak pointer into a raw pointer, while still preserving the ownership of
/// one weak reference (the weak count is not modified by this operation). It can be turned
/// back into the `Weak<T>` with [`from_raw`].
///
/// The same restrictions of accessing the target of the pointer as with
/// [`as_ptr`] apply.
@ -1741,8 +1735,8 @@ impl<T> Weak<T> {
/// assert_eq!(0, Rc::weak_count(&strong));
/// ```
///
/// [`from_raw`]: struct.Weak.html#method.from_raw
/// [`as_ptr`]: struct.Weak.html#method.as_ptr
/// [`from_raw`]: Weak::from_raw
/// [`as_ptr`]: Weak::as_ptr
#[stable(feature = "weak_into_raw", since = "1.45.0")]
pub fn into_raw(self) -> *const T {
let result = self.as_ptr();
@ -1755,17 +1749,18 @@ impl<T> Weak<T> {
/// This can be used to safely get a strong reference (by calling [`upgrade`]
/// later) or to deallocate the weak count by dropping the `Weak<T>`.
///
/// It takes ownership of one weak count (with the exception of pointers created by [`new`],
/// as these don't have any corresponding weak count).
/// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
/// as these don't own anything; the method still works on them).
///
/// # Safety
///
/// The pointer must have originated from the [`into_raw`] and must still own its potential
/// weak reference count.
/// The pointer must have originated from the [`into_raw`] and must still own its potential
/// weak reference.
///
/// It is allowed for the strong count to be 0 at the time of calling this, but the weak count
/// must be non-zero or the pointer must have originated from a dangling `Weak<T>` (one created
/// by [`new`]).
/// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
/// takes ownership of one weak reference currently represented as a raw pointer (the weak
/// count is not modified by this operation) and therefore it must be paired with a previous
/// call to [`into_raw`].
///
/// # Examples
///
@ -1788,12 +1783,9 @@ impl<T> Weak<T> {
/// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
/// ```
///
/// [`into_raw`]: struct.Weak.html#method.into_raw
/// [`upgrade`]: struct.Weak.html#method.upgrade
/// [`Rc`]: struct.Rc.html
/// [`Weak`]: struct.Weak.html
/// [`new`]: struct.Weak.html#method.new
/// [`forget`]: ../../std/mem/fn.forget.html
/// [`into_raw`]: Weak::into_raw
/// [`upgrade`]: Weak::upgrade
/// [`new`]: Weak::new
#[stable(feature = "weak_into_raw", since = "1.45.0")]
pub unsafe fn from_raw(ptr: *const T) -> Self {
if ptr.is_null() {
@ -1821,9 +1813,6 @@ impl<T: ?Sized> Weak<T> {
///
/// Returns [`None`] if the inner value has since been dropped.
///
/// [`Rc`]: struct.Rc.html
/// [`None`]: ../../std/option/enum.Option.html
///
/// # Examples
///
/// ```
@ -1856,8 +1845,6 @@ impl<T: ?Sized> Weak<T> {
/// Gets the number of strong (`Rc`) pointers pointing to this allocation.
///
/// If `self` was created using [`Weak::new`], this will return 0.
///
/// [`Weak::new`]: #method.new
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn strong_count(&self) -> usize {
if let Some(inner) = self.inner() { inner.strong() } else { 0 }
@ -1926,7 +1913,7 @@ impl<T: ?Sized> Weak<T> {
/// assert!(!first.ptr_eq(&third));
/// ```
///
/// [`ptr::eq`]: ../../std/ptr/fn.eq.html
/// [`ptr::eq`]: core::ptr::eq
#[inline]
#[stable(feature = "weak_ptr_eq", since = "1.39.0")]
pub fn ptr_eq(&self, other: &Self) -> bool {
@ -2008,8 +1995,8 @@ impl<T> Default for Weak<T> {
/// Constructs a new `Weak<T>`, allocating memory for `T` without initializing
/// it. Calling [`upgrade`] on the return value always gives [`None`].
///
/// [`None`]: ../../std/option/enum.Option.html
/// [`upgrade`]: ../../std/rc/struct.Weak.html#method.upgrade
/// [`None`]: Option
/// [`upgrade`]: Weak::upgrade
///
/// # Examples
///
@ -2117,7 +2104,7 @@ impl<T: ?Sized> AsRef<T> for Rc<T> {
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized> Unpin for Rc<T> {}
/// Get the offset within an `ArcInner` for
/// Get the offset within an `RcBox` for
/// a payload of type described by a pointer.
///
/// # Safety
@ -2126,7 +2113,7 @@ impl<T: ?Sized> Unpin for Rc<T> {}
///
/// - This function is safe for any argument if `T` is sized, and
/// - if `T` is unsized, the pointer must have appropriate pointer metadata
/// aquired from the real instance that you are getting this offset for.
/// acquired from the real instance that you are getting this offset for.
unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
// Align the unsized value to the end of the `RcBox`.
// Because it is ?Sized, it will always be the last field in memory.

1067
library/alloc/src/slice.rs Normal file

File diff suppressed because it is too large Load Diff

576
library/alloc/src/str.rs Normal file
View File

@ -0,0 +1,576 @@
//! Unicode string slices.
//!
//! *[See also the `str` primitive type](../../std/primitive.str.html).*
//!
//! The `&str` type is one of the two main string types, the other being `String`.
//! Unlike its `String` counterpart, its contents are borrowed.
//!
//! # Basic Usage
//!
//! A basic string declaration of `&str` type:
//!
//! ```
//! let hello_world = "Hello, World!";
//! ```
//!
//! Here we have declared a string literal, also known as a string slice.
//! String literals have a static lifetime, which means the string `hello_world`
//! is guaranteed to be valid for the duration of the entire program.
//! We can explicitly specify `hello_world`'s lifetime as well:
//!
//! ```
//! let hello_world: &'static str = "Hello, world!";
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
#![allow(unused_imports)]
use core::borrow::{Borrow, BorrowMut};
use core::iter::FusedIterator;
use core::mem;
use core::ptr;
use core::str::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher};
use core::unicode::conversions;
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::slice::{Concat, Join, SliceIndex};
use crate::string::String;
use crate::vec::Vec;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::pattern;
#[stable(feature = "encode_utf16", since = "1.8.0")]
pub use core::str::EncodeUtf16;
#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
pub use core::str::SplitAsciiWhitespace;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::SplitWhitespace;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8, from_utf8_mut, Bytes, CharIndices, Chars};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
#[stable(feature = "str_escape", since = "1.34.0")]
pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{FromStr, Utf8Error};
#[allow(deprecated)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{Lines, LinesAny};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{MatchIndices, RMatchIndices};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{Matches, RMatches};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplit, Split};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitN, SplitN};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitTerminator, SplitTerminator};
/// Note: `str` in `Concat<str>` is not meaningful here.
/// This type parameter of the trait only exists to enable another impl.
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<str>> Concat<str> for [S] {
type Output = String;
fn concat(slice: &Self) -> String {
Join::join(slice, "")
}
}
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<str>> Join<&str> for [S] {
type Output = String;
fn join(slice: &Self, sep: &str) -> String {
unsafe { String::from_utf8_unchecked(join_generic_copy(slice, sep.as_bytes())) }
}
}
macro_rules! spezialize_for_lengths {
($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => {
let mut target = $target;
let iter = $iter;
let sep_bytes = $separator;
match $separator.len() {
$(
// loops with hardcoded sizes run much faster
// specialize the cases with small separator lengths
$num => {
for s in iter {
copy_slice_and_advance!(target, sep_bytes);
copy_slice_and_advance!(target, s.borrow().as_ref());
}
},
)*
_ => {
// arbitrary non-zero size fallback
for s in iter {
copy_slice_and_advance!(target, sep_bytes);
copy_slice_and_advance!(target, s.borrow().as_ref());
}
}
}
};
}
macro_rules! copy_slice_and_advance {
($target:expr, $bytes:expr) => {
let len = $bytes.len();
let (head, tail) = { $target }.split_at_mut(len);
head.copy_from_slice($bytes);
$target = tail;
};
}
// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
// only user of this function. It is left in place for the time when that is fixed.
//
// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
// [T] and str both impl AsRef<[T]> for some T
// => s.borrow().as_ref() and we always have slices
fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T>
where
T: Copy,
B: AsRef<[T]> + ?Sized,
S: Borrow<B>,
{
let sep_len = sep.len();
let mut iter = slice.iter();
// the first slice is the only one without a separator preceding it
let first = match iter.next() {
Some(first) => first,
None => return vec![],
};
// compute the exact total length of the joined Vec
// if the `len` calculation overflows, we'll panic
// we would have run out of memory anyway and the rest of the function requires
// the entire Vec pre-allocated for safety
let len = sep_len
.checked_mul(iter.len())
.and_then(|n| {
slice.iter().map(|s| s.borrow().as_ref().len()).try_fold(n, usize::checked_add)
})
.expect("attempt to join into collection with len > usize::MAX");
// crucial for safety
let mut result = Vec::with_capacity(len);
assert!(result.capacity() >= len);
result.extend_from_slice(first.borrow().as_ref());
unsafe {
{
let pos = result.len();
let target = result.get_unchecked_mut(pos..len);
// copy separator and slices over without bounds checks
// generate loops with hardcoded offsets for small separators
// massive improvements possible (~ x2)
spezialize_for_lengths!(sep, target, iter; 0, 1, 2, 3, 4);
}
result.set_len(len);
}
result
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Borrow<str> for String {
#[inline]
fn borrow(&self) -> &str {
&self[..]
}
}
#[stable(feature = "string_borrow_mut", since = "1.36.0")]
impl BorrowMut<str> for String {
#[inline]
fn borrow_mut(&mut self) -> &mut str {
&mut self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl ToOwned for str {
type Owned = String;
#[inline]
fn to_owned(&self) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
}
fn clone_into(&self, target: &mut String) {
let mut b = mem::take(target).into_bytes();
self.as_bytes().clone_into(&mut b);
*target = unsafe { String::from_utf8_unchecked(b) }
}
}
/// Methods for string slices.
#[lang = "str_alloc"]
#[cfg(not(test))]
impl str {
/// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "this is a string";
/// let boxed_str = s.to_owned().into_boxed_str();
/// let boxed_bytes = boxed_str.into_boxed_bytes();
/// assert_eq!(*boxed_bytes, *s.as_bytes());
/// ```
#[stable(feature = "str_box_extras", since = "1.20.0")]
#[inline]
pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
self.into()
}
/// Replaces all matches of a pattern with another string.
///
/// `replace` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "this is old";
///
/// assert_eq!("this is new", s.replace("old", "new"));
/// ```
///
/// When the pattern doesn't match:
///
/// ```
/// let s = "this is old";
/// assert_eq!(s, s.replace("cookie monster", "little lamb"));
/// ```
#[must_use = "this returns the replaced string as a new allocation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String {
let mut result = String::new();
let mut last_end = 0;
for (start, part) in self.match_indices(from) {
result.push_str(unsafe { self.get_unchecked(last_end..start) });
result.push_str(to);
last_end = start + part.len();
}
result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
result
}
/// Replaces first N matches of a pattern with another string.
///
/// `replacen` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice at most `count` times.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "foo foo 123 foo";
/// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2));
/// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3));
/// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
/// ```
///
/// When the pattern doesn't match:
///
/// ```
/// let s = "this is old";
/// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10));
/// ```
#[must_use = "this returns the replaced string as a new allocation, \
without modifying the original"]
#[stable(feature = "str_replacen", since = "1.16.0")]
pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String {
// Hope to reduce the times of re-allocation
let mut result = String::with_capacity(32);
let mut last_end = 0;
for (start, part) in self.match_indices(pat).take(count) {
result.push_str(unsafe { self.get_unchecked(last_end..start) });
result.push_str(to);
last_end = start + part.len();
}
result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
result
}
/// Returns the lowercase equivalent of this string slice, as a new [`String`].
///
/// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property
/// `Lowercase`.
///
/// Since some characters can expand into multiple characters when changing
/// the case, this function returns a [`String`] instead of modifying the
/// parameter in-place.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "HELLO";
///
/// assert_eq!("hello", s.to_lowercase());
/// ```
///
/// A tricky example, with sigma:
///
/// ```
/// let sigma = "Σ";
///
/// assert_eq!("σ", sigma.to_lowercase());
///
/// // but at the end of a word, it's ς, not σ:
/// let odysseus = "ὈΔΥΣΣΕΎΣ";
///
/// assert_eq!("ὀδυσσεύς", odysseus.to_lowercase());
/// ```
///
/// Languages without case are not changed:
///
/// ```
/// let new_year = "农历新年";
///
/// assert_eq!(new_year, new_year.to_lowercase());
/// ```
#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
pub fn to_lowercase(&self) -> String {
let mut s = String::with_capacity(self.len());
for (i, c) in self[..].char_indices() {
if c == 'Σ' {
// Σ maps to σ, except at the end of a word where it maps to ς.
// This is the only conditional (contextual) but language-independent mapping
// in `SpecialCasing.txt`,
// so hard-code it rather than have a generic "condition" mechanism.
// See https://github.com/rust-lang/rust/issues/26035
map_uppercase_sigma(self, i, &mut s)
} else {
match conversions::to_lower(c) {
[a, '\0', _] => s.push(a),
[a, b, '\0'] => {
s.push(a);
s.push(b);
}
[a, b, c] => {
s.push(a);
s.push(b);
s.push(c);
}
}
}
}
return s;
fn map_uppercase_sigma(from: &str, i: usize, to: &mut String) {
// See http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
// for the definition of `Final_Sigma`.
debug_assert!('Σ'.len_utf8() == 2);
let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev())
&& !case_ignoreable_then_cased(from[i + 2..].chars());
to.push_str(if is_word_final { "ς" } else { "σ" });
}
fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
use core::unicode::derived_property::{Case_Ignorable, Cased};
match iter.skip_while(|&c| Case_Ignorable(c)).next() {
Some(c) => Cased(c),
None => false,
}
}
}
/// Returns the uppercase equivalent of this string slice, as a new [`String`].
///
/// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property
/// `Uppercase`.
///
/// Since some characters can expand into multiple characters when changing
/// the case, this function returns a [`String`] instead of modifying the
/// parameter in-place.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "hello";
///
/// assert_eq!("HELLO", s.to_uppercase());
/// ```
///
/// Scripts without case are not changed:
///
/// ```
/// let new_year = "农历新年";
///
/// assert_eq!(new_year, new_year.to_uppercase());
/// ```
///
/// One character can become multiple:
/// ```
/// let s = "tschüß";
///
/// assert_eq!("TSCHÜSS", s.to_uppercase());
/// ```
#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
pub fn to_uppercase(&self) -> String {
let mut s = String::with_capacity(self.len());
for c in self[..].chars() {
match conversions::to_upper(c) {
[a, '\0', _] => s.push(a),
[a, b, '\0'] => {
s.push(a);
s.push(b);
}
[a, b, c] => {
s.push(a);
s.push(b);
s.push(c);
}
}
}
s
}
/// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
///
/// [`Box<str>`]: Box
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let string = String::from("birthday gift");
/// let boxed_str = string.clone().into_boxed_str();
///
/// assert_eq!(boxed_str.into_string(), string);
/// ```
#[stable(feature = "box_str", since = "1.4.0")]
#[inline]
pub fn into_string(self: Box<str>) -> String {
let slice = Box::<[u8]>::from(self);
unsafe { String::from_utf8_unchecked(slice.into_vec()) }
}
/// Creates a new [`String`] by repeating a string `n` times.
///
/// # Panics
///
/// This function will panic if the capacity would overflow.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// assert_eq!("abc".repeat(4), String::from("abcabcabcabc"));
/// ```
///
/// A panic upon overflow:
///
/// ```should_panic
/// // this will panic at runtime
/// "0123456789abcdef".repeat(usize::MAX);
/// ```
#[stable(feature = "repeat_str", since = "1.16.0")]
pub fn repeat(&self, n: usize) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
}
/// Returns a copy of this string where each character is mapped to its
/// ASCII upper case equivalent.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To uppercase the value in-place, use [`make_ascii_uppercase`].
///
/// To uppercase ASCII characters in addition to non-ASCII characters, use
/// [`to_uppercase`].
///
/// # Examples
///
/// ```
/// let s = "Grüße, Jürgen ❤";
///
/// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
/// ```
///
/// [`make_ascii_uppercase`]: str::make_ascii_uppercase
/// [`to_uppercase`]: #method.to_uppercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> String {
let mut bytes = self.as_bytes().to_vec();
bytes.make_ascii_uppercase();
// make_ascii_uppercase() preserves the UTF-8 invariant.
unsafe { String::from_utf8_unchecked(bytes) }
}
/// Returns a copy of this string where each character is mapped to its
/// ASCII lower case equivalent.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To lowercase the value in-place, use [`make_ascii_lowercase`].
///
/// To lowercase ASCII characters in addition to non-ASCII characters, use
/// [`to_lowercase`].
///
/// # Examples
///
/// ```
/// let s = "Grüße, Jürgen ❤";
///
/// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
/// ```
///
/// [`make_ascii_lowercase`]: str::make_ascii_lowercase
/// [`to_lowercase`]: #method.to_lowercase
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> String {
let mut bytes = self.as_bytes().to_vec();
bytes.make_ascii_lowercase();
// make_ascii_lowercase() preserves the UTF-8 invariant.
unsafe { String::from_utf8_unchecked(bytes) }
}
}
/// Converts a boxed slice of bytes to a boxed string slice without checking
/// that the string contains valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let smile_utf8 = Box::new([226, 152, 186]);
/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
///
/// assert_eq!("☺", &*smile);
/// ```
#[stable(feature = "str_box_extras", since = "1.20.0")]
#[inline]
pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
}

View File

@ -4,8 +4,6 @@
//! [`ToString`]s, and several error types that may result from working with
//! [`String`]s.
//!
//! [`ToString`]: trait.ToString.html
//!
//! # Examples
//!
//! There are multiple ways to create a new [`String`] from a string literal:
@ -20,8 +18,6 @@
//! You can create a new [`String`] from an existing one by concatenating with
//! `+`:
//!
//! [`String`]: struct.String.html
//!
//! ```
//! let s = "Hello".to_string();
//!
@ -67,11 +63,11 @@ use crate::vec::Vec;
/// contents of the string. It has a close relationship with its borrowed
/// counterpart, the primitive [`str`].
///
/// [`str`]: ../../std/primitive.str.html
///
/// # Examples
///
/// You can create a `String` from a literal string with [`String::from`]:
/// You can create a `String` from [a literal string][`str`] with [`String::from`]:
///
/// [`String::from`]: From::from
///
/// ```
/// let hello = String::from("Hello, world!");
@ -87,10 +83,8 @@ use crate::vec::Vec;
/// hello.push_str("orld!");
/// ```
///
/// [`String::from`]: #method.from
/// [`char`]: ../../std/primitive.char.html
/// [`push`]: #method.push
/// [`push_str`]: #method.push_str
/// [`push`]: String::push
/// [`push_str`]: String::push_str
///
/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
/// the [`from_utf8`] method:
@ -105,7 +99,7 @@ use crate::vec::Vec;
/// assert_eq!("💖", sparkle_heart);
/// ```
///
/// [`from_utf8`]: #method.from_utf8
/// [`from_utf8`]: String::from_utf8
///
/// # UTF-8
///
@ -128,8 +122,8 @@ use crate::vec::Vec;
/// The [`bytes`] and [`chars`] methods return iterators over the first
/// two, respectively.
///
/// [`bytes`]: #method.bytes
/// [`chars`]: #method.chars
/// [`bytes`]: str::bytes
/// [`chars`]: str::chars
///
/// # Deref
///
@ -215,9 +209,9 @@ use crate::vec::Vec;
/// assert_eq!(String::from("Once upon a time..."), s);
/// ```
///
/// [`as_ptr`]: #method.as_ptr
/// [`len`]: #method.len
/// [`capacity`]: #method.capacity
/// [`as_ptr`]: str::as_ptr
/// [`len`]: String::len
/// [`capacity`]: String::capacity
///
/// If a `String` has enough capacity, adding elements to it will not
/// re-allocate. For example, consider this program:
@ -259,7 +253,7 @@ use crate::vec::Vec;
/// }
/// ```
///
/// [`with_capacity`]: #method.with_capacity
/// [`with_capacity`]: String::with_capacity
///
/// We end up with a different output:
///
@ -274,9 +268,10 @@ use crate::vec::Vec;
///
/// Here, there's no need to allocate more memory inside the loop.
///
/// [`&str`]: ../../std/primitive.str.html
/// [`Deref`]: ../../std/ops/trait.Deref.html
/// [`as_str()`]: struct.String.html#method.as_str
/// [`str`]: prim@str
/// [`&str`]: prim@str
/// [`Deref`]: core::ops::Deref
/// [`as_str()`]: String::as_str
#[derive(PartialOrd, Eq, Ord)]
#[cfg_attr(not(test), rustc_diagnostic_item = "string_type")]
#[stable(feature = "rust1", since = "1.0.0")]
@ -291,20 +286,18 @@ pub struct String {
/// [`into_bytes`] method will give back the byte vector that was used in the
/// conversion attempt.
///
/// [`from_utf8`]: struct.String.html#method.from_utf8
/// [`String`]: struct.String.html
/// [`into_bytes`]: struct.FromUtf8Error.html#method.into_bytes
/// [`from_utf8`]: String::from_utf8
/// [`into_bytes`]: FromUtf8Error::into_bytes
///
/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
/// through the [`utf8_error`] method.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
/// [`utf8_error`]: #method.utf8_error
/// [`Utf8Error`]: core::str::Utf8Error
/// [`std::str`]: core::str
/// [`&str`]: prim@str
/// [`utf8_error`]: Self::utf8_error
///
/// # Examples
///
@ -330,9 +323,7 @@ pub struct FromUtf8Error {
///
/// This type is the error type for the [`from_utf16`] method on [`String`].
///
/// [`from_utf16`]: struct.String.html#method.from_utf16
/// [`String`]: struct.String.html
///
/// [`from_utf16`]: String::from_utf16
/// # Examples
///
/// Basic usage:
@ -358,7 +349,7 @@ impl String {
/// consider the [`with_capacity`] method to prevent excessive
/// re-allocation.
///
/// [`with_capacity`]: #method.with_capacity
/// [`with_capacity`]: String::with_capacity
///
/// # Examples
///
@ -383,12 +374,12 @@ impl String {
/// appending a bunch of data to the `String`, reducing the number of
/// reallocations it needs to do.
///
/// [`capacity`]: #method.capacity
/// [`capacity`]: String::capacity
///
/// If the given capacity is `0`, no allocation will occur, and this method
/// is identical to the [`new`] method.
///
/// [`new`]: #method.new
/// [`new`]: String::new
///
/// # Examples
///
@ -479,15 +470,10 @@ impl String {
/// See the docs for [`FromUtf8Error`] for more details on what you can do
/// with this error.
///
/// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
/// [`String`]: struct.String.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`Vec<u8>`]: ../../std/vec/struct.Vec.html
/// [`&str`]: ../../std/primitive.str.html
/// [`str::from_utf8`]: ../../std/str/fn.from_utf8.html
/// [`into_bytes`]: struct.String.html#method.into_bytes
/// [`FromUtf8Error`]: struct.FromUtf8Error.html
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
/// [`from_utf8_unchecked`]: String::from_utf8_unchecked
/// [`Vec<u8>`]: crate::vec::Vec
/// [`&str`]: prim@str
/// [`into_bytes`]: String::into_bytes
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> {
@ -506,16 +492,15 @@ impl String {
/// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: <20>
///
/// [`u8`]: ../../std/primitive.u8.html
/// [byteslice]: ../../std/primitive.slice.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
/// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the conversion, there is an unsafe version
/// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the checks.
///
/// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
/// [`from_utf8_unchecked`]: String::from_utf8_unchecked
///
/// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
/// UTF-8, then we need to insert the replacement characters, which will
@ -523,7 +508,7 @@ impl String {
/// it's already valid UTF-8, we don't need a new allocation. This return
/// type allows us to handle both cases.
///
/// [`Cow<'a, str>`]: ../../std/borrow/enum.Cow.html
/// [`Cow<'a, str>`]: crate::borrow::Cow
///
/// # Examples
///
@ -583,8 +568,6 @@ impl String {
/// Decode a UTF-16 encoded vector `v` into a `String`, returning [`Err`]
/// if `v` contains any invalid data.
///
/// [`Err`]: ../../std/result/enum.Result.html#variant.Err
///
/// # Examples
///
/// Basic usage:
@ -623,9 +606,9 @@ impl String {
/// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8
/// conversion requires a memory allocation.
///
/// [`from_utf8_lossy`]: #method.from_utf8_lossy
/// [`Cow<'a, str>`]: ../borrow/enum.Cow.html
/// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html
/// [`from_utf8_lossy`]: String::from_utf8_lossy
/// [`Cow<'a, str>`]: crate::borrow::Cow
/// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
///
/// # Examples
///
@ -659,7 +642,7 @@ impl String {
/// into a `String` with the [`from_raw_parts`] function, allowing
/// the destructor to perform the cleanup.
///
/// [`from_raw_parts`]: #method.from_raw_parts
/// [`from_raw_parts`]: String::from_raw_parts
///
/// # Examples
///
@ -684,15 +667,16 @@ impl String {
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * The memory at `ptr` needs to have been previously allocated by the
/// * The memory at `buf` needs to have been previously allocated by the
/// same allocator the standard library uses, with a required alignment of exactly 1.
/// * `length` needs to be less than or equal to `capacity`.
/// * `capacity` needs to be the correct value.
/// * The first `length` bytes at `buf` need to be valid UTF-8.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures.
///
/// The ownership of `ptr` is effectively transferred to the
/// The ownership of `buf` is effectively transferred to the
/// `String` which may then deallocate, reallocate or change the
/// contents of memory pointed to by the pointer at will. Ensure
/// that nothing else uses the pointer after calling this
@ -732,7 +716,7 @@ impl String {
///
/// See the safe version, [`from_utf8`], for more details.
///
/// [`from_utf8`]: struct.String.html#method.from_utf8
/// [`from_utf8`]: String::from_utf8
///
/// # Safety
///
@ -867,8 +851,7 @@ impl String {
///
/// Panics if the new capacity overflows [`usize`].
///
/// [`reserve_exact`]: struct.String.html#method.reserve_exact
/// [`usize`]: ../../std/primitive.usize.html
/// [`reserve_exact`]: String::reserve_exact
///
/// # Examples
///
@ -911,7 +894,7 @@ impl String {
/// Consider using the [`reserve`] method unless you absolutely know
/// better than the allocator.
///
/// [`reserve`]: #method.reserve
/// [`reserve`]: String::reserve
///
/// # Panics
///
@ -1076,8 +1059,6 @@ impl String {
/// Appends the given [`char`] to the end of this `String`.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
@ -1104,7 +1085,7 @@ impl String {
///
/// The inverse of this method is [`from_utf8`].
///
/// [`from_utf8`]: #method.from_utf8
/// [`from_utf8`]: String::from_utf8
///
/// # Examples
///
@ -1133,8 +1114,6 @@ impl String {
///
/// Panics if `new_len` does not lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
@ -1159,8 +1138,6 @@ impl String {
///
/// Returns [`None`] if this `String` is empty.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// Basic usage:
@ -1187,7 +1164,7 @@ impl String {
/// Removes a [`char`] from this `String` at a byte position and returns it.
///
/// This is an `O(n)` operation, as it requires copying every element in the
/// This is an *O*(*n*) operation, as it requires copying every element in the
/// buffer.
///
/// # Panics
@ -1195,8 +1172,6 @@ impl String {
/// Panics if `idx` is larger than or equal to the `String`'s length,
/// or if it does not lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
@ -1289,7 +1264,7 @@ impl String {
/// Inserts a character into this `String` at a byte position.
///
/// This is an `O(n)` operation as it requires copying every element in the
/// This is an *O*(*n*) operation as it requires copying every element in the
/// buffer.
///
/// # Panics
@ -1297,8 +1272,6 @@ impl String {
/// Panics if `idx` is larger than the `String`'s length, or if it does not
/// lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
@ -1338,7 +1311,7 @@ impl String {
/// Inserts a string slice into this `String` at a byte position.
///
/// This is an `O(n)` operation as it requires copying every element in the
/// This is an *O*(*n*) operation as it requires copying every element in the
/// buffer.
///
/// # Panics
@ -1346,8 +1319,6 @@ impl String {
/// Panics if `idx` is larger than the `String`'s length, or if it does not
/// lie on a [`char`] boundary.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
@ -1507,8 +1478,6 @@ impl String {
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// [`char`]: ../../std/primitive.char.html
///
/// # Examples
///
/// Basic usage:
@ -1567,9 +1536,6 @@ impl String {
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// [`char`]: ../../std/primitive.char.html
/// [`Vec::splice`]: ../../std/vec/struct.Vec.html#method.splice
///
/// # Examples
///
/// Basic usage:
@ -1610,8 +1576,7 @@ impl String {
///
/// This will drop any excess capacity.
///
/// [`Box`]: ../../std/boxed/struct.Box.html
/// [`str`]: ../../std/primitive.str.html
/// [`str`]: prim@str
///
/// # Examples
///
@ -1680,10 +1645,8 @@ impl FromUtf8Error {
/// an analogue to `FromUtf8Error`. See its documentation for more details
/// on using it.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
/// [`std::str`]: core::str
/// [`&str`]: prim@str
///
/// # Examples
///
@ -2012,7 +1975,7 @@ impl hash::Hash for String {
///
/// This consumes the `String` on the left-hand side and re-uses its buffer (growing it if
/// necessary). This is done to avoid allocating a new `String` and copying the entire contents on
/// every operation, which would lead to `O(n^2)` running time when building an `n`-byte string by
/// every operation, which would lead to *O*(*n*^2) running time when building an *n*-byte string by
/// repeated concatenation.
///
/// The string on the right-hand side is only borrowed; its contents are copied into the returned
@ -2187,7 +2150,7 @@ impl ops::DerefMut for String {
///
/// This alias exists for backwards compatibility, and may be eventually deprecated.
///
/// [`Infallible`]: ../../core/convert/enum.Infallible.html
/// [`Infallible`]: core::convert::Infallible
#[stable(feature = "str_parse_error", since = "1.5.0")]
pub type ParseError = core::convert::Infallible;
@ -2207,7 +2170,7 @@ impl FromStr for String {
/// [`Display`] should be implemented instead, and you get the `ToString`
/// implementation for free.
///
/// [`Display`]: ../../std/fmt/trait.Display.html
/// [`Display`]: fmt::Display
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToString {
/// Converts the given value to a `String`.
@ -2235,6 +2198,9 @@ pub trait ToString {
/// since `fmt::Write for String` never returns an error itself.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Display + ?Sized> ToString for T {
// A common guideline is to not inline generic functions. However,
// remove `#[inline]` from this method causes non-negligible regression.
// See <https://github.com/rust-lang/rust/pull/74852> as last attempt try to remove it.
#[inline]
default fn to_string(&self) -> String {
use fmt::Write;
@ -2465,8 +2431,7 @@ impl fmt::Write for String {
/// This struct is created by the [`drain`] method on [`String`]. See its
/// documentation for more.
///
/// [`drain`]: struct.String.html#method.drain
/// [`String`]: struct.String.html
/// [`drain`]: String::drain
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a> {
/// Will be used as &'a mut String in the destructor

View File

@ -2,12 +2,9 @@
//! Thread-safe reference-counting pointers.
//!
//! See the [`Arc<T>`][arc] documentation for more details.
//!
//! [arc]: struct.Arc.html
//! See the [`Arc<T>`][Arc] documentation for more details.
use core::any::Any;
use core::array::LengthAtMost32;
use core::borrow;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
@ -24,7 +21,7 @@ use core::slice::from_raw_parts_mut;
use core::sync::atomic;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout};
use crate::alloc::{box_free, handle_alloc_error, AllocErr, AllocRef, Global, Layout};
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::rc::is_dangling;
@ -101,21 +98,21 @@ macro_rules! acquire {
/// ## Breaking cycles with `Weak`
///
/// The [`downgrade`][downgrade] method can be used to create a non-owning
/// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d
/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
/// already been dropped. In other words, `Weak` pointers do not keep the value
/// inside the allocation alive; however, they *do* keep the allocation
/// (the backing store for the value) alive.
///
/// A cycle between `Arc` pointers will never be deallocated. For this reason,
/// [`Weak`][weak] is used to break cycles. For example, a tree could have
/// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak]
/// [`Weak`] is used to break cycles. For example, a tree could have
/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
/// pointers from children back to their parents.
///
/// # Cloning references
///
/// Creating a new reference from an existing reference counted pointer is done using the
/// `Clone` trait implemented for [`Arc<T>`][arc] and [`Weak<T>`][weak].
/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
///
/// ```
/// use std::sync::Arc;
@ -140,23 +137,20 @@ macro_rules! acquire {
/// Arc::downgrade(&my_arc);
/// ```
///
/// [`Weak<T>`][weak] does not auto-dereference to `T`, because the inner value may have
/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
/// already been dropped.
///
/// [arc]: struct.Arc.html
/// [weak]: struct.Weak.html
/// [`Rc<T>`]: ../../std/rc/struct.Rc.html
/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
/// [`Rc<T>`]: crate::rc::Rc
/// [clone]: Clone::clone
/// [mutex]: ../../std/sync/struct.Mutex.html
/// [rwlock]: ../../std/sync/struct.RwLock.html
/// [atomic]: ../../std/sync/atomic/index.html
/// [`Send`]: ../../std/marker/trait.Send.html
/// [`Sync`]: ../../std/marker/trait.Sync.html
/// [deref]: ../../std/ops/trait.Deref.html
/// [downgrade]: struct.Arc.html#method.downgrade
/// [upgrade]: struct.Weak.html#method.upgrade
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`RefCell<T>`]: ../../std/cell/struct.RefCell.html
/// [atomic]: core::sync::atomic
/// [`Send`]: core::marker::Send
/// [`Sync`]: core::marker::Sync
/// [deref]: core::ops::Deref
/// [downgrade]: Arc::downgrade
/// [upgrade]: Weak::upgrade
/// [`RefCell<T>`]: core::cell::RefCell
/// [`std::sync`]: ../../std/sync/index.html
/// [`Arc::clone(&from)`]: #method.clone
///
@ -185,7 +179,7 @@ macro_rules! acquire {
///
/// Sharing a mutable [`AtomicUsize`]:
///
/// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html
/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize
///
/// ```no_run
/// use std::sync::Arc;
@ -255,11 +249,7 @@ impl<T: ?Sized> Arc<T> {
///
/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
///
/// [`Arc`]: struct.Arc.html
/// [`Arc::downgrade`]: struct.Arc.html#method.downgrade
/// [`upgrade`]: struct.Weak.html#method.upgrade
/// [`Option`]: ../../std/option/enum.Option.html
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`upgrade`]: Weak::upgrade
#[stable(feature = "arc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
@ -329,6 +319,79 @@ impl<T> Arc<T> {
Self::from_inner(Box::leak(x).into())
}
/// Constructs a new `Arc<T>` using a weak reference to itself. Attempting
/// to upgrade the weak reference before this function returns will result
/// in a `None` value. However, the weak reference may be cloned freely and
/// stored for use at a later time.
///
/// # Examples
/// ```
/// #![feature(arc_new_cyclic)]
/// #![allow(dead_code)]
///
/// use std::sync::{Arc, Weak};
///
/// struct Foo {
/// me: Weak<Foo>,
/// }
///
/// let foo = Arc::new_cyclic(|me| Foo {
/// me: me.clone(),
/// });
/// ```
#[inline]
#[unstable(feature = "arc_new_cyclic", issue = "75861")]
pub fn new_cyclic(data_fn: impl FnOnce(&Weak<T>) -> T) -> Arc<T> {
// Construct the inner in the "uninitialized" state with a single
// weak reference.
let uninit_ptr: NonNull<_> = Box::leak(box ArcInner {
strong: atomic::AtomicUsize::new(0),
weak: atomic::AtomicUsize::new(1),
data: mem::MaybeUninit::<T>::uninit(),
})
.into();
let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
let weak = Weak { ptr: init_ptr };
// It's important we don't give up ownership of the weak pointer, or
// else the memory might be freed by the time `data_fn` returns. If
// we really wanted to pass ownership, we could create an additional
// weak pointer for ourselves, but this would result in additional
// updates to the weak reference count which might not be necessary
// otherwise.
let data = data_fn(&weak);
// Now we can properly initialize the inner value and turn our weak
// reference into a strong reference.
unsafe {
let inner = init_ptr.as_ptr();
ptr::write(&raw mut (*inner).data, data);
// The above write to the data field must be visible to any threads which
// observe a non-zero strong count. Therefore we need at least "Release" ordering
// in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
//
// "Acquire" ordering is not required. When considering the possible behaviours
// of `data_fn` we only need to look at what it could do with a reference to a
// non-upgradeable `Weak`:
// - It can *clone* the `Weak`, increasing the weak reference count.
// - It can drop those clones, decreasing the weak reference count (but never to zero).
//
// These side effects do not impact us in any way, and no other side effects are
// possible with safe code alone.
let prev_value = (*inner).strong.fetch_add(1, Release);
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
}
let strong = Arc::from_inner(init_ptr);
// Strong references should collectively own a shared weak reference,
// so don't run the destructor for our old weak reference.
mem::forget(weak);
strong
}
/// Constructs a new `Arc` with uninitialized contents.
///
/// # Examples
@ -353,9 +416,11 @@ impl<T> Arc<T> {
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
unsafe {
Arc::from_ptr(Arc::allocate_for_layout(Layout::new::<T>(), |mem| {
mem as *mut ArcInner<mem::MaybeUninit<T>>
}))
Arc::from_ptr(Arc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.alloc(layout),
|mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
))
}
}
@ -382,9 +447,11 @@ impl<T> Arc<T> {
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
unsafe {
let mut uninit = Self::new_uninit();
ptr::write_bytes::<T>(Arc::get_mut_unchecked(&mut uninit).as_mut_ptr(), 0, 1);
uninit
Arc::from_ptr(Arc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.alloc_zeroed(layout),
|mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
))
}
}
@ -397,13 +464,11 @@ impl<T> Arc<T> {
/// Returns the inner value, if the `Arc` has exactly one strong reference.
///
/// Otherwise, an [`Err`][result] is returned with the same `Arc` that was
/// Otherwise, an [`Err`] is returned with the same `Arc` that was
/// passed in.
///
/// This will succeed even if there are outstanding weak references.
///
/// [result]: ../../std/result/enum.Result.html
///
/// # Examples
///
/// ```
@ -438,7 +503,7 @@ impl<T> Arc<T> {
}
impl<T> Arc<[T]> {
/// Constructs a new reference-counted slice with uninitialized contents.
/// Constructs a new atomically reference-counted slice with uninitialized contents.
///
/// # Examples
///
@ -465,6 +530,40 @@ impl<T> Arc<[T]> {
pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
}
/// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
/// filled with `0` bytes.
///
/// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
/// incorrect usage of this method.
///
/// # Examples
///
/// ```
/// #![feature(new_uninit)]
///
/// use std::sync::Arc;
///
/// let values = Arc::<[u32]>::new_zeroed_slice(3);
/// let values = unsafe { values.assume_init() };
///
/// assert_eq!(*values, [0, 0, 0])
/// ```
///
/// [zeroed]: ../../std/mem/union.MaybeUninit.html#method.zeroed
#[unstable(feature = "new_uninit", issue = "63291")]
pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
unsafe {
Arc::from_ptr(Arc::allocate_for_layout(
Layout::array::<T>(len).unwrap(),
|layout| Global.alloc_zeroed(layout),
|mem| {
ptr::slice_from_raw_parts_mut(mem as *mut T, len)
as *mut ArcInner<[mem::MaybeUninit<T>]>
},
))
}
}
}
impl<T> Arc<mem::MaybeUninit<T>> {
@ -551,9 +650,7 @@ impl<T: ?Sized> Arc<T> {
/// Consumes the `Arc`, returning the wrapped pointer.
///
/// To avoid a memory leak the pointer must be converted back to an `Arc` using
/// [`Arc::from_raw`][from_raw].
///
/// [from_raw]: struct.Arc.html#method.from_raw
/// [`Arc::from_raw`].
///
/// # Examples
///
@ -573,7 +670,7 @@ impl<T: ?Sized> Arc<T> {
/// Provides a raw pointer to the data.
///
/// The counts are not affected in way and the `Arc` is not consumed. The pointer is valid for
/// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
/// as long as there are strong counts in the `Arc`.
///
/// # Examples
@ -613,8 +710,8 @@ impl<T: ?Sized> Arc<T> {
/// This function is unsafe because improper use may lead to memory unsafety,
/// even if the returned `Arc<T>` is never accessed.
///
/// [into_raw]: struct.Arc.html#method.into_raw
/// [transmute]: ../../std/mem/fn.transmute.html
/// [into_raw]: Arc::into_raw
/// [transmute]: core::mem::transmute
///
/// # Examples
///
@ -647,32 +744,7 @@ impl<T: ?Sized> Arc<T> {
}
}
/// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
///
/// # Examples
///
/// ```
/// #![feature(rc_into_raw_non_null)]
/// #![allow(deprecated)]
///
/// use std::sync::Arc;
///
/// let x = Arc::new("hello".to_owned());
/// let ptr = Arc::into_raw_non_null(x);
/// let deref = unsafe { ptr.as_ref() };
/// assert_eq!(deref, "hello");
/// ```
#[unstable(feature = "rc_into_raw_non_null", issue = "47336")]
#[rustc_deprecated(since = "1.44.0", reason = "use `Arc::into_raw` instead")]
#[inline]
pub fn into_raw_non_null(this: Self) -> NonNull<T> {
// safe because Arc guarantees its pointer is non-null
unsafe { NonNull::new_unchecked(Arc::into_raw(this) as *mut _) }
}
/// Creates a new [`Weak`][weak] pointer to this allocation.
///
/// [weak]: struct.Weak.html
/// Creates a new [`Weak`] pointer to this allocation.
///
/// # Examples
///
@ -714,9 +786,7 @@ impl<T: ?Sized> Arc<T> {
}
}
/// Gets the number of [`Weak`][weak] pointers to this allocation.
///
/// [weak]: struct.Weak.html
/// Gets the number of [`Weak`] pointers to this allocation.
///
/// # Safety
///
@ -885,7 +955,7 @@ impl<T: ?Sized> Arc<T> {
/// assert!(!Arc::ptr_eq(&five, &other_five));
/// ```
///
/// [`ptr::eq`]: ../../std/ptr/fn.eq.html
/// [`ptr::eq`]: core::ptr::eq
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr.as_ptr() == other.ptr.as_ptr()
}
@ -899,6 +969,7 @@ impl<T: ?Sized> Arc<T> {
/// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
unsafe fn allocate_for_layout(
value_layout: Layout,
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocErr>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
// Calculate layout using the given value layout.
@ -907,12 +978,10 @@ impl<T: ?Sized> Arc<T> {
// reference (see #54908).
let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
let mem = Global
.alloc(layout, AllocInit::Uninitialized)
.unwrap_or_else(|_| handle_alloc_error(layout));
let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
// Initialize the ArcInner
let inner = mem_to_arcinner(mem.ptr.as_ptr());
let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
unsafe {
@ -927,9 +996,11 @@ impl<T: ?Sized> Arc<T> {
unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
// Allocate for the `ArcInner<T>` using the given value.
unsafe {
Self::allocate_for_layout(Layout::for_value(&*ptr), |mem| {
set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>
})
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.alloc(layout),
|mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
)
}
}
@ -960,9 +1031,11 @@ impl<T> Arc<[T]> {
/// Allocates an `ArcInner<[T]>` with the given length.
unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
unsafe {
Self::allocate_for_layout(Layout::array::<T>(len).unwrap(), |mem| {
ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>
})
Self::allocate_for_layout(
Layout::array::<T>(len).unwrap(),
|layout| Global.alloc(layout),
|mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
)
}
}
}
@ -1012,7 +1085,7 @@ impl<T> Arc<[T]> {
let slice = from_raw_parts_mut(self.elems, self.n_elems);
ptr::drop_in_place(slice);
Global.dealloc(self.mem.cast(), self.layout);
Global.dealloc(self.mem, self.layout);
}
}
}
@ -1124,7 +1197,7 @@ impl<T: ?Sized> Receiver for Arc<T> {}
impl<T: Clone> Arc<T> {
/// Makes a mutable reference into the given `Arc`.
///
/// If there are other `Arc` or [`Weak`][weak] pointers to the same allocation,
/// If there are other `Arc` or [`Weak`] pointers to the same allocation,
/// then `make_mut` will create a new allocation and invoke [`clone`][clone] on the inner value
/// to ensure unique ownership. This is also referred to as clone-on-write.
///
@ -1133,10 +1206,9 @@ impl<T: Clone> Arc<T> {
///
/// See also [`get_mut`][get_mut], which will fail rather than cloning.
///
/// [weak]: struct.Weak.html
/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
/// [get_mut]: struct.Arc.html#method.get_mut
/// [`Rc::make_mut`]: ../rc/struct.Rc.html#method.make_mut
/// [clone]: Clone::clone
/// [get_mut]: Arc::get_mut
/// [`Rc::make_mut`]: super::rc::Rc::make_mut
///
/// # Examples
///
@ -1210,18 +1282,16 @@ impl<T: Clone> Arc<T> {
impl<T: ?Sized> Arc<T> {
/// Returns a mutable reference into the given `Arc`, if there are
/// no other `Arc` or [`Weak`][weak] pointers to the same allocation.
/// no other `Arc` or [`Weak`] pointers to the same allocation.
///
/// Returns [`None`][option] otherwise, because it is not safe to
/// Returns [`None`] otherwise, because it is not safe to
/// mutate a shared value.
///
/// See also [`make_mut`][make_mut], which will [`clone`][clone]
/// the inner value when there are other pointers.
///
/// [weak]: struct.Weak.html
/// [option]: ../../std/option/enum.Option.html
/// [make_mut]: struct.Arc.html#method.make_mut
/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone
/// [make_mut]: Arc::make_mut
/// [clone]: Clone::clone
///
/// # Examples
///
@ -1255,7 +1325,7 @@ impl<T: ?Sized> Arc<T> {
///
/// See also [`get_mut`], which is safe and does appropriate checks.
///
/// [`get_mut`]: struct.Arc.html#method.get_mut
/// [`get_mut`]: Arc::get_mut
///
/// # Safety
///
@ -1341,8 +1411,6 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
/// drop(foo); // Doesn't print anything
/// drop(foo2); // Prints "dropped!"
/// ```
///
/// [`Weak`]: ../../std/sync/struct.Weak.html
#[inline]
fn drop(&mut self) {
// Because `fetch_sub` is already atomic, we do not need to synchronize
@ -1427,8 +1495,7 @@ impl<T> Weak<T> {
/// Constructs a new `Weak<T>`, without allocating any memory.
/// Calling [`upgrade`] on the return value always gives [`None`].
///
/// [`upgrade`]: struct.Weak.html#method.upgrade
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`upgrade`]: Weak::upgrade
///
/// # Examples
///
@ -1467,7 +1534,7 @@ impl<T> Weak<T> {
/// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
/// ```
///
/// [`null`]: ../../std/ptr/fn.null.html
/// [`null`]: core::ptr::null
#[stable(feature = "weak_into_raw", since = "1.45.0")]
pub fn as_ptr(&self) -> *const T {
let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
@ -1486,8 +1553,9 @@ impl<T> Weak<T> {
/// Consumes the `Weak<T>` and turns it into a raw pointer.
///
/// This converts the weak pointer into a raw pointer, preserving the original weak count. It
/// can be turned back into the `Weak<T>` with [`from_raw`].
/// This converts the weak pointer into a raw pointer, while still preserving the ownership of
/// one weak reference (the weak count is not modified by this operation). It can be turned
/// back into the `Weak<T>` with [`from_raw`].
///
/// The same restrictions of accessing the target of the pointer as with
/// [`as_ptr`] apply.
@ -1508,8 +1576,8 @@ impl<T> Weak<T> {
/// assert_eq!(0, Arc::weak_count(&strong));
/// ```
///
/// [`from_raw`]: struct.Weak.html#method.from_raw
/// [`as_ptr`]: struct.Weak.html#method.as_ptr
/// [`from_raw`]: Weak::from_raw
/// [`as_ptr`]: Weak::as_ptr
#[stable(feature = "weak_into_raw", since = "1.45.0")]
pub fn into_raw(self) -> *const T {
let result = self.as_ptr();
@ -1517,24 +1585,23 @@ impl<T> Weak<T> {
result
}
/// Converts a raw pointer previously created by [`into_raw`] back into
/// `Weak<T>`.
/// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
///
/// This can be used to safely get a strong reference (by calling [`upgrade`]
/// later) or to deallocate the weak count by dropping the `Weak<T>`.
///
/// It takes ownership of one weak count (with the exception of pointers created by [`new`],
/// as these don't have any corresponding weak count).
/// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
/// as these don't own anything; the method still works on them).
///
/// # Safety
///
/// The pointer must have originated from the [`into_raw`] and must still own its potential
/// weak reference count.
///
/// It is allowed for the strong count to be 0 at the time of calling this, but the weak count
/// must be non-zero or the pointer must have originated from a dangling `Weak<T>` (one created
/// by [`new`]).
/// weak reference.
///
/// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
/// takes ownership of one weak reference currently represented as a raw pointer (the weak
/// count is not modified by this operation) and therefore it must be paired with a previous
/// call to [`into_raw`].
/// # Examples
///
/// ```
@ -1556,12 +1623,10 @@ impl<T> Weak<T> {
/// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
/// ```
///
/// [`new`]: struct.Weak.html#method.new
/// [`into_raw`]: struct.Weak.html#method.into_raw
/// [`upgrade`]: struct.Weak.html#method.upgrade
/// [`Weak`]: struct.Weak.html
/// [`Arc`]: struct.Arc.html
/// [`forget`]: ../../std/mem/fn.forget.html
/// [`new`]: Weak::new
/// [`into_raw`]: Weak::into_raw
/// [`upgrade`]: Weak::upgrade
/// [`forget`]: std::mem::forget
#[stable(feature = "weak_into_raw", since = "1.45.0")]
pub unsafe fn from_raw(ptr: *const T) -> Self {
if ptr.is_null() {
@ -1591,9 +1656,6 @@ impl<T: ?Sized> Weak<T> {
///
/// Returns [`None`] if the inner value has since been dropped.
///
/// [`Arc`]: struct.Arc.html
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
///
/// ```
@ -1615,7 +1677,8 @@ impl<T: ?Sized> Weak<T> {
#[stable(feature = "arc_weak", since = "1.4.0")]
pub fn upgrade(&self) -> Option<Arc<T>> {
// We use a CAS loop to increment the strong count instead of a
// fetch_add because once the count hits 0 it must never be above 0.
// fetch_add as this function should never take the reference count
// from zero to one.
let inner = self.inner()?;
// Relaxed load because any write of 0 that we can observe
@ -1634,8 +1697,11 @@ impl<T: ?Sized> Weak<T> {
abort();
}
// Relaxed is valid for the same reason it is on Arc's Clone impl
match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
// Relaxed is fine for the failure case because we don't have any expectations about the new state.
// Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
// value can be initialized after `Weak` references have already been created. In that case, we
// expect to observe the fully initialized value.
match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) {
Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
Err(old) => n = old,
}
@ -1645,8 +1711,6 @@ impl<T: ?Sized> Weak<T> {
/// Gets the number of strong (`Arc`) pointers pointing to this allocation.
///
/// If `self` was created using [`Weak::new`], this will return 0.
///
/// [`Weak::new`]: #method.new
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn strong_count(&self) -> usize {
if let Some(inner) = self.inner() { inner.strong.load(SeqCst) } else { 0 }
@ -1663,8 +1727,6 @@ impl<T: ?Sized> Weak<T> {
/// Due to implementation details, the returned value can be off by 1 in
/// either direction when other threads are manipulating any `Arc`s or
/// `Weak`s pointing to the same allocation.
///
/// [`Weak::new`]: #method.new
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn weak_count(&self) -> usize {
self.inner()
@ -1742,7 +1804,7 @@ impl<T: ?Sized> Weak<T> {
/// assert!(!first.ptr_eq(&third));
/// ```
///
/// [`ptr::eq`]: ../../std/ptr/fn.eq.html
/// [`ptr::eq`]: core::ptr::eq
#[inline]
#[stable(feature = "weak_ptr_eq", since = "1.39.0")]
pub fn ptr_eq(&self, other: &Self) -> bool {
@ -1791,8 +1853,7 @@ impl<T> Default for Weak<T> {
/// Calling [`upgrade`] on the return value always
/// gives [`None`].
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade
/// [`upgrade`]: Weak::upgrade
///
/// # Examples
///
@ -2162,10 +2223,7 @@ where
}
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]>
where
[T; N]: LengthAtMost32,
{
impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
type Error = Arc<[T]>;
fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
@ -2282,7 +2340,7 @@ impl<T: ?Sized> Unpin for Arc<T> {}
///
/// - This function is safe for any argument if `T` is sized, and
/// - if `T` is unsized, the pointer must have appropriate pointer metadata
/// aquired from the real instance that you are getting this offset for.
/// acquired from the real instance that you are getting this offset for.
unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
// Align the unsized value to the end of the `ArcInner`.
// Because it is `?Sized`, it will always be the last field in memory.

View File

@ -0,0 +1,561 @@
use super::*;
use std::boxed::Box;
use std::clone::Clone;
use std::convert::{From, TryInto};
use std::mem::drop;
use std::ops::Drop;
use std::option::Option::{self, None, Some};
use std::sync::atomic::{
self,
Ordering::{Acquire, SeqCst},
};
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
use crate::vec::Vec;
struct Canary(*mut atomic::AtomicUsize);
impl Drop for Canary {
fn drop(&mut self) {
unsafe {
match *self {
Canary(c) => {
(*c).fetch_add(1, SeqCst);
}
}
}
}
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn manually_share_arc() {
let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = Arc::new(v);
let (tx, rx) = channel();
let _t = thread::spawn(move || {
let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
assert_eq!((*arc_v)[3], 4);
});
tx.send(arc_v.clone()).unwrap();
assert_eq!((*arc_v)[2], 3);
assert_eq!((*arc_v)[4], 5);
}
#[test]
fn test_arc_get_mut() {
let mut x = Arc::new(3);
*Arc::get_mut(&mut x).unwrap() = 4;
assert_eq!(*x, 4);
let y = x.clone();
assert!(Arc::get_mut(&mut x).is_none());
drop(y);
assert!(Arc::get_mut(&mut x).is_some());
let _w = Arc::downgrade(&x);
assert!(Arc::get_mut(&mut x).is_none());
}
#[test]
fn weak_counts() {
assert_eq!(Weak::weak_count(&Weak::<u64>::new()), 0);
assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
let a = Arc::new(0);
let w = Arc::downgrade(&a);
assert_eq!(Weak::strong_count(&w), 1);
assert_eq!(Weak::weak_count(&w), 1);
let w2 = w.clone();
assert_eq!(Weak::strong_count(&w), 1);
assert_eq!(Weak::weak_count(&w), 2);
assert_eq!(Weak::strong_count(&w2), 1);
assert_eq!(Weak::weak_count(&w2), 2);
drop(w);
assert_eq!(Weak::strong_count(&w2), 1);
assert_eq!(Weak::weak_count(&w2), 1);
let a2 = a.clone();
assert_eq!(Weak::strong_count(&w2), 2);
assert_eq!(Weak::weak_count(&w2), 1);
drop(a2);
drop(a);
assert_eq!(Weak::strong_count(&w2), 0);
assert_eq!(Weak::weak_count(&w2), 0);
drop(w2);
}
#[test]
fn try_unwrap() {
let x = Arc::new(3);
assert_eq!(Arc::try_unwrap(x), Ok(3));
let x = Arc::new(4);
let _y = x.clone();
assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
let x = Arc::new(5);
let _w = Arc::downgrade(&x);
assert_eq!(Arc::try_unwrap(x), Ok(5));
}
#[test]
fn into_from_raw() {
let x = Arc::new(box "hello");
let y = x.clone();
let x_ptr = Arc::into_raw(x);
drop(y);
unsafe {
assert_eq!(**x_ptr, "hello");
let x = Arc::from_raw(x_ptr);
assert_eq!(**x, "hello");
assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
}
}
#[test]
fn test_into_from_raw_unsized() {
use std::fmt::Display;
use std::string::ToString;
let arc: Arc<str> = Arc::from("foo");
let ptr = Arc::into_raw(arc.clone());
let arc2 = unsafe { Arc::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }, "foo");
assert_eq!(arc, arc2);
let arc: Arc<dyn Display> = Arc::new(123);
let ptr = Arc::into_raw(arc.clone());
let arc2 = unsafe { Arc::from_raw(ptr) };
assert_eq!(unsafe { &*ptr }.to_string(), "123");
assert_eq!(arc2.to_string(), "123");
}
#[test]
fn test_cowarc_clone_make_mut() {
let mut cow0 = Arc::new(75);
let mut cow1 = cow0.clone();
let mut cow2 = cow1.clone();
assert!(75 == *Arc::make_mut(&mut cow0));
assert!(75 == *Arc::make_mut(&mut cow1));
assert!(75 == *Arc::make_mut(&mut cow2));
*Arc::make_mut(&mut cow0) += 1;
*Arc::make_mut(&mut cow1) += 2;
*Arc::make_mut(&mut cow2) += 3;
assert!(76 == *cow0);
assert!(77 == *cow1);
assert!(78 == *cow2);
// none should point to the same backing memory
assert!(*cow0 != *cow1);
assert!(*cow0 != *cow2);
assert!(*cow1 != *cow2);
}
#[test]
fn test_cowarc_clone_unique2() {
let mut cow0 = Arc::new(75);
let cow1 = cow0.clone();
let cow2 = cow1.clone();
assert!(75 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
*Arc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(75 == *cow1);
assert!(75 == *cow2);
// cow1 and cow2 should share the same contents
// cow0 should have a unique reference
assert!(*cow0 != *cow1);
assert!(*cow0 != *cow2);
assert!(*cow1 == *cow2);
}
#[test]
fn test_cowarc_clone_weak() {
let mut cow0 = Arc::new(75);
let cow1_weak = Arc::downgrade(&cow0);
assert!(75 == *cow0);
assert!(75 == *cow1_weak.upgrade().unwrap());
*Arc::make_mut(&mut cow0) += 1;
assert!(76 == *cow0);
assert!(cow1_weak.upgrade().is_none());
}
#[test]
fn test_live() {
let x = Arc::new(5);
let y = Arc::downgrade(&x);
assert!(y.upgrade().is_some());
}
#[test]
fn test_dead() {
let x = Arc::new(5);
let y = Arc::downgrade(&x);
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn weak_self_cyclic() {
struct Cycle {
x: Mutex<Option<Weak<Cycle>>>,
}
let a = Arc::new(Cycle { x: Mutex::new(None) });
let b = Arc::downgrade(&a.clone());
*a.x.lock().unwrap() = Some(b);
// hopefully we don't double-free (or leak)...
}
#[test]
fn drop_arc() {
let mut canary = atomic::AtomicUsize::new(0);
let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
drop(x);
assert!(canary.load(Acquire) == 1);
}
#[test]
fn drop_arc_weak() {
let mut canary = atomic::AtomicUsize::new(0);
let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
let arc_weak = Arc::downgrade(&arc);
assert!(canary.load(Acquire) == 0);
drop(arc);
assert!(canary.load(Acquire) == 1);
drop(arc_weak);
}
#[test]
fn test_strong_count() {
let a = Arc::new(0);
assert!(Arc::strong_count(&a) == 1);
let w = Arc::downgrade(&a);
assert!(Arc::strong_count(&a) == 1);
let b = w.upgrade().expect("");
assert!(Arc::strong_count(&b) == 2);
assert!(Arc::strong_count(&a) == 2);
drop(w);
drop(a);
assert!(Arc::strong_count(&b) == 1);
let c = b.clone();
assert!(Arc::strong_count(&b) == 2);
assert!(Arc::strong_count(&c) == 2);
}
#[test]
fn test_weak_count() {
let a = Arc::new(0);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 0);
let w = Arc::downgrade(&a);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 1);
let x = w.clone();
assert!(Arc::weak_count(&a) == 2);
drop(w);
drop(x);
assert!(Arc::strong_count(&a) == 1);
assert!(Arc::weak_count(&a) == 0);
let c = a.clone();
assert!(Arc::strong_count(&a) == 2);
assert!(Arc::weak_count(&a) == 0);
let d = Arc::downgrade(&c);
assert!(Arc::weak_count(&c) == 1);
assert!(Arc::strong_count(&c) == 2);
drop(a);
drop(c);
drop(d);
}
#[test]
fn show_arc() {
let a = Arc::new(5);
assert_eq!(format!("{:?}", a), "5");
}
// Make sure deriving works with Arc<T>
#[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
struct Foo {
inner: Arc<i32>,
}
#[test]
fn test_unsized() {
let x: Arc<[i32]> = Arc::new([1, 2, 3]);
assert_eq!(format!("{:?}", x), "[1, 2, 3]");
let y = Arc::downgrade(&x.clone());
drop(x);
assert!(y.upgrade().is_none());
}
#[test]
fn test_from_owned() {
let foo = 123;
let foo_arc = Arc::from(foo);
assert!(123 == *foo_arc);
}
#[test]
fn test_new_weak() {
let foo: Weak<usize> = Weak::new();
assert!(foo.upgrade().is_none());
}
#[test]
fn test_ptr_eq() {
let five = Arc::new(5);
let same_five = five.clone();
let other_five = Arc::new(5);
assert!(Arc::ptr_eq(&five, &same_five));
assert!(!Arc::ptr_eq(&five, &other_five));
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn test_weak_count_locked() {
let mut a = Arc::new(atomic::AtomicBool::new(false));
let a2 = a.clone();
let t = thread::spawn(move || {
// Miri is too slow
let count = if cfg!(miri) { 1000 } else { 1000000 };
for _i in 0..count {
Arc::get_mut(&mut a);
}
a.store(true, SeqCst);
});
while !a2.load(SeqCst) {
let n = Arc::weak_count(&a2);
assert!(n < 2, "bad weak count: {}", n);
#[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint.
atomic::spin_loop_hint();
}
t.join().unwrap();
}
#[test]
fn test_from_str() {
let r: Arc<str> = Arc::from("foo");
assert_eq!(&r[..], "foo");
}
#[test]
fn test_copy_from_slice() {
let s: &[u32] = &[1, 2, 3];
let r: Arc<[u32]> = Arc::from(s);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_clone_from_slice() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct X(u32);
let s: &[X] = &[X(1), X(2), X(3)];
let r: Arc<[X]> = Arc::from(s);
assert_eq!(&r[..], s);
}
#[test]
#[should_panic]
fn test_clone_from_slice_panic() {
use std::string::{String, ToString};
struct Fail(u32, String);
impl Clone for Fail {
fn clone(&self) -> Fail {
if self.0 == 2 {
panic!();
}
Fail(self.0, self.1.clone())
}
}
let s: &[Fail] =
&[Fail(0, "foo".to_string()), Fail(1, "bar".to_string()), Fail(2, "baz".to_string())];
// Should panic, but not cause memory corruption
let _r: Arc<[Fail]> = Arc::from(s);
}
#[test]
fn test_from_box() {
let b: Box<u32> = box 123;
let r: Arc<u32> = Arc::from(b);
assert_eq!(*r, 123);
}
#[test]
fn test_from_box_str() {
use std::string::String;
let s = String::from("foo").into_boxed_str();
let r: Arc<str> = Arc::from(s);
assert_eq!(&r[..], "foo");
}
#[test]
fn test_from_box_slice() {
let s = vec![1, 2, 3].into_boxed_slice();
let r: Arc<[u32]> = Arc::from(s);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_from_box_trait() {
use std::fmt::Display;
use std::string::ToString;
let b: Box<dyn Display> = box 123;
let r: Arc<dyn Display> = Arc::from(b);
assert_eq!(r.to_string(), "123");
}
#[test]
fn test_from_box_trait_zero_sized() {
use std::fmt::Debug;
let b: Box<dyn Debug> = box ();
let r: Arc<dyn Debug> = Arc::from(b);
assert_eq!(format!("{:?}", r), "()");
}
#[test]
fn test_from_vec() {
let v = vec![1, 2, 3];
let r: Arc<[u32]> = Arc::from(v);
assert_eq!(&r[..], [1, 2, 3]);
}
#[test]
fn test_downcast() {
use std::any::Any;
let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::MAX);
let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
assert!(r1.clone().downcast::<u32>().is_err());
let r1i32 = r1.downcast::<i32>();
assert!(r1i32.is_ok());
assert_eq!(r1i32.unwrap(), Arc::new(i32::MAX));
assert!(r2.clone().downcast::<i32>().is_err());
let r2str = r2.downcast::<&'static str>();
assert!(r2str.is_ok());
assert_eq!(r2str.unwrap(), Arc::new("abc"));
}
#[test]
fn test_array_from_slice() {
let v = vec![1, 2, 3];
let r: Arc<[u32]> = Arc::from(v);
let a: Result<Arc<[u32; 3]>, _> = r.clone().try_into();
assert!(a.is_ok());
let a: Result<Arc<[u32; 2]>, _> = r.clone().try_into();
assert!(a.is_err());
}
#[test]
fn test_arc_cyclic_with_zero_refs() {
struct ZeroRefs {
inner: Weak<ZeroRefs>,
}
let zero_refs = Arc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
ZeroRefs { inner: Weak::new() }
});
assert_eq!(Arc::strong_count(&zero_refs), 1);
assert_eq!(Arc::weak_count(&zero_refs), 0);
assert_eq!(zero_refs.inner.strong_count(), 0);
assert_eq!(zero_refs.inner.weak_count(), 0);
}
#[test]
fn test_arc_new_cyclic_one_ref() {
struct OneRef {
inner: Weak<OneRef>,
}
let one_ref = Arc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
OneRef { inner: inner.clone() }
});
assert_eq!(Arc::strong_count(&one_ref), 1);
assert_eq!(Arc::weak_count(&one_ref), 1);
let one_ref2 = Weak::upgrade(&one_ref.inner).unwrap();
assert!(Arc::ptr_eq(&one_ref, &one_ref2));
assert_eq!(Arc::strong_count(&one_ref), 2);
assert_eq!(Arc::weak_count(&one_ref), 1);
}
#[test]
fn test_arc_cyclic_two_refs() {
struct TwoRefs {
inner1: Weak<TwoRefs>,
inner2: Weak<TwoRefs>,
}
let two_refs = Arc::new_cyclic(|inner| {
assert_eq!(inner.strong_count(), 0);
assert!(inner.upgrade().is_none());
let inner1 = inner.clone();
let inner2 = inner1.clone();
TwoRefs { inner1, inner2 }
});
assert_eq!(Arc::strong_count(&two_refs), 1);
assert_eq!(Arc::weak_count(&two_refs), 2);
let two_refs1 = Weak::upgrade(&two_refs.inner1).unwrap();
assert!(Arc::ptr_eq(&two_refs, &two_refs1));
let two_refs2 = Weak::upgrade(&two_refs.inner2).unwrap();
assert!(Arc::ptr_eq(&two_refs, &two_refs2));
assert_eq!(Arc::strong_count(&two_refs), 3);
assert_eq!(Arc::weak_count(&two_refs), 2);
}

View File

@ -13,11 +13,9 @@ use crate::sync::Arc;
///
/// This trait is a memory-safe and ergonomic alternative to constructing a
/// [`RawWaker`]. It supports the common executor design in which the data used
/// to wake up a task is stored in an [`Arc`][arc]. Some executors (especially
/// to wake up a task is stored in an [`Arc`]. Some executors (especially
/// those for embedded systems) cannot use this API, which is why [`RawWaker`]
/// exists as an alternative for those systems.
///
/// [arc]: ../../std/sync/struct.Arc.html
#[unstable(feature = "wake_trait", issue = "69912")]
pub trait Wake {
/// Wake this task.
@ -69,14 +67,13 @@ fn raw_waker<W: Wake + Send + Sync + 'static>(waker: Arc<W>) -> RawWaker {
// Wake by value, moving the Arc into the Wake::wake function
unsafe fn wake<W: Wake + Send + Sync + 'static>(waker: *const ()) {
let waker: Arc<W> = unsafe { Arc::from_raw(waker as *const W) };
let waker = unsafe { Arc::from_raw(waker as *const W) };
<W as Wake>::wake(waker);
}
// Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it
unsafe fn wake_by_ref<W: Wake + Send + Sync + 'static>(waker: *const ()) {
let waker: ManuallyDrop<Arc<W>> =
unsafe { ManuallyDrop::new(Arc::from_raw(waker as *const W)) };
let waker = unsafe { ManuallyDrop::new(Arc::from_raw(waker as *const W)) };
<W as Wake>::wake_by_ref(&waker);
}

3139
library/alloc/src/vec.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,51 @@
use std::mem::MaybeUninit;
use std::ptr::NonNull;
#[test]
fn unitialized_zero_size_box() {
assert_eq!(
&*Box::<()>::new_uninit() as *const _,
NonNull::<MaybeUninit<()>>::dangling().as_ptr(),
);
assert_eq!(
Box::<[()]>::new_uninit_slice(4).as_ptr(),
NonNull::<MaybeUninit<()>>::dangling().as_ptr(),
);
assert_eq!(
Box::<[String]>::new_uninit_slice(0).as_ptr(),
NonNull::<MaybeUninit<String>>::dangling().as_ptr(),
);
}
#[derive(Clone, PartialEq, Eq, Debug)]
struct Dummy {
_data: u8,
}
#[test]
fn box_clone_and_clone_from_equivalence() {
for size in (0..8).map(|i| 2usize.pow(i)) {
let control = vec![Dummy { _data: 42 }; size].into_boxed_slice();
let clone = control.clone();
let mut copy = vec![Dummy { _data: 84 }; size].into_boxed_slice();
copy.clone_from(&control);
assert_eq!(control, clone);
assert_eq!(control, copy);
}
}
/// This test might give a false positive in case the box realocates, but the alocator keeps the
/// original pointer.
///
/// On the other hand it won't give a false negative, if it fails than the memory was definitely not
/// reused
#[test]
fn box_clone_from_ptr_stability() {
for size in (0..8).map(|i| 2usize.pow(i)) {
let control = vec![Dummy { _data: 42 }; size].into_boxed_slice();
let mut copy = vec![Dummy { _data: 84 }; size].into_boxed_slice();
let copy_raw = copy.as_ptr() as usize;
copy.clone_from(&control);
assert_eq!(copy.as_ptr() as usize, copy_raw);
}
}

View File

@ -0,0 +1,19 @@
use std::collections::BTreeSet;
#[test]
fn test_hash() {
use crate::hash;
let mut x = BTreeSet::new();
let mut y = BTreeSet::new();
x.insert(1);
x.insert(2);
x.insert(3);
y.insert(3);
y.insert(2);
y.insert(1);
assert_eq!(hash(&x), hash(&y));
}

View File

@ -0,0 +1,44 @@
use std::alloc::{AllocRef, Global, Layout, System};
/// Issue #45955 and #62251.
#[test]
fn alloc_system_overaligned_request() {
check_overalign_requests(System)
}
#[test]
fn std_heap_overaligned_request() {
check_overalign_requests(Global)
}
fn check_overalign_requests<T: AllocRef>(mut allocator: T) {
for &align in &[4, 8, 16, 32] {
// less than and bigger than `MIN_ALIGN`
for &size in &[align / 2, align - 1] {
// size less than alignment
let iterations = 128;
unsafe {
let pointers: Vec<_> = (0..iterations)
.map(|_| {
allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap()
})
.collect();
for &ptr in &pointers {
assert_eq!(
(ptr.as_non_null_ptr().as_ptr() as usize) % align,
0,
"Got a pointer less aligned than requested"
)
}
// Clean up
for &ptr in &pointers {
allocator.dealloc(
ptr.as_non_null_ptr(),
Layout::from_size_align(size, align).unwrap(),
)
}
}
}
}
}

View File

@ -0,0 +1,57 @@
#![feature(allocator_api)]
#![feature(box_syntax)]
#![feature(drain_filter)]
#![feature(exact_size_is_empty)]
#![feature(new_uninit)]
#![feature(pattern)]
#![feature(str_split_once)]
#![feature(trusted_len)]
#![feature(try_reserve)]
#![feature(unboxed_closures)]
#![feature(associated_type_bounds)]
#![feature(binary_heap_into_iter_sorted)]
#![feature(binary_heap_drain_sorted)]
#![feature(slice_ptr_get)]
#![feature(split_inclusive)]
#![feature(binary_heap_retain)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
mod arc;
mod binary_heap;
mod borrow;
mod boxed;
mod btree_set_hash;
mod cow_str;
mod fmt;
mod heap;
mod linked_list;
mod rc;
mod slice;
mod str;
mod string;
mod vec;
mod vec_deque;
fn hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten.
// See https://github.com/kripken/emscripten-fastcomp/issues/169
#[cfg(not(target_os = "emscripten"))]
#[test]
fn test_boxed_hasher() {
let ordinary_hash = hash(&5u32);
let mut hasher_1 = Box::new(DefaultHasher::new());
5u32.hash(&mut hasher_1);
assert_eq!(ordinary_hash, hasher_1.finish());
let mut hasher_2 = Box::new(DefaultHasher::new()) as Box<dyn Hasher>;
5u32.hash(&mut hasher_2);
assert_eq!(ordinary_hash, hasher_2.finish());
}

1771
library/alloc/tests/slice.rs Normal file

File diff suppressed because it is too large Load Diff

1923
library/alloc/tests/str.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,194 @@
name: CI
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
test:
name: Test
runs-on: ${{ matrix.os }}
strategy:
matrix:
thing: [stable, beta, nightly, macos, windows-msvc64, windows-msvc32, windows-gnu64, windows-gnu32]
include:
- thing: stable
os: ubuntu-latest
rust: stable
- thing: beta
os: ubuntu-latest
rust: beta
- thing: nightly
os: ubuntu-latest
rust: nightly
- thing: macos
os: macos-latest
rust: stable
# Note that these are on nightly due to rust-lang/rust#63700 not being
# on stable yet
- thing: windows-msvc64
os: windows-latest
rust: stable-x86_64-msvc
- thing: windows-msvc32
os: windows-latest
rust: stable-i686-msvc
- thing: windows-gnu64
os: windows-latest
rust: stable-x86_64-gnu
- thing: windows-gnu32
os: windows-latest
rust: stable-i686-gnu
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust (rustup)
run: rustup update ${{ matrix.rust }} --no-self-update && rustup default ${{ matrix.rust }}
shell: bash
# full fidelity of backtraces on 32-bit msvc requires frame pointers, so
# enable that for our tests
- name: Force frame pointers
run: echo "##[set-env name=RUSTFLAGS]-Cforce-frame-pointers"
shell: bash
if: matrix.thing == 'windows-msvc32'
- run: cargo build --manifest-path crates/backtrace-sys/Cargo.toml
- run: cargo build
- run: cargo test
- run: cargo test --features "gimli-symbolize"
- run: cargo test --features "libbacktrace"
- run: cargo test --features "libbacktrace gimli-symbolize"
- run: cargo test --features "serialize-rustc"
- run: cargo test --features "serialize-serde"
- run: cargo test --features "verify-winapi"
- run: cargo test --features "cpp_demangle"
- run: cargo test --no-default-features
- run: cargo test --no-default-features --features "libbacktrace"
- run: cargo test --no-default-features --features "gimli-symbolize"
- run: cargo test --no-default-features --features "gimli-symbolize libbacktrace"
- run: cargo test --no-default-features --features "libbacktrace std"
- run: cargo test --no-default-features --features "gimli-symbolize std"
- run: cargo test --no-default-features --features "std"
- run: cargo test --manifest-path crates/cpp_smoke_test/Cargo.toml
- run: cargo test --manifest-path crates/macos_frames_test/Cargo.toml
- run: cargo test --features libbacktrace --manifest-path crates/without_debuginfo/Cargo.toml
- run: cargo test --features gimli-symbolize --manifest-path crates/without_debuginfo/Cargo.toml
- run: cargo test --manifest-path crates/line-tables-only/Cargo.toml --features libbacktrace
- run: cargo test --manifest-path crates/line-tables-only/Cargo.toml --features gimli-symbolize
- run: RUSTFLAGS="-C link-arg=-Wl,--compress-debug-sections=zlib-gabi" cargo test --features gimli-symbolize
if: contains(matrix.os, 'ubuntu')
- run: RUSTFLAGS="-C link-arg=-Wl,--compress-debug-sections=zlib-gnu" cargo test --features gimli-symbolize
if: contains(matrix.os, 'ubuntu')
- run: cargo build --manifest-path crates/as-if-std/Cargo.toml
windows_arm64:
name: Windows AArch64
runs-on: windows-latest
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust
run: rustup update stable --no-self-update && rustup default stable
shell: bash
- run: rustup target add aarch64-pc-windows-msvc
- run: cargo test --no-run --target aarch64-pc-windows-msvc
- run: cargo test --no-run --target aarch64-pc-windows-msvc --features verify-winapi
ios:
name: iOS
runs-on: macos-latest
strategy:
matrix:
target:
- aarch64-apple-ios
- x86_64-apple-ios
include:
- target: aarch64-apple-ios
sdk: iphoneos
- target: x86_64-apple-ios
sdk: iphonesimulator
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust (
run: |
curl https://sh.rustup.rs | sh -s -- -y
echo "##[add-path]$HOME/.cargo/bin"
- run: rustup target add ${{ matrix.target }}
- run: |
export SDK_PATH=`xcrun --show-sdk-path --sdk ${{ matrix.sdk }}`
export RUSTFLAGS="-C link-arg=-isysroot -C link-arg=$SDK_PATH"
cargo test --no-run --target ${{ matrix.target }}
name: Build tests
docker:
name: Docker
runs-on: ubuntu-latest
strategy:
matrix:
target:
- aarch64-unknown-linux-gnu
- arm-unknown-linux-gnueabihf
- armv7-unknown-linux-gnueabihf
- i586-unknown-linux-gnu
- i686-unknown-linux-gnu
- powerpc64-unknown-linux-gnu
- x86_64-pc-windows-gnu
- x86_64-unknown-linux-gnu
- x86_64-unknown-linux-musl
- arm-linux-androideabi
- armv7-linux-androideabi
- aarch64-linux-android
- i686-linux-android
- x86_64-linux-android
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust
run: rustup update stable && rustup default stable
- run: rustup target add ${{ matrix.target }}
- run: cargo generate-lockfile
- run: ./ci/run-docker.sh ${{ matrix.target }}
rustfmt:
name: Rustfmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust
run: rustup update stable && rustup default stable && rustup component add rustfmt
- run: cargo fmt --all -- --check
build:
name: Build Targets
runs-on: ubuntu-latest
strategy:
matrix:
target: [wasm32-unknown-unknown, wasm32-wasi, x86_64-fuchsia, x86_64-fortanix-unknown-sgx]
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust
run: rustup update nightly && rustup default nightly
- run: rustup target add ${{ matrix.target }}
- run: cargo build --target ${{ matrix.target }}
msrv:
name: MSRV
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
submodules: true
- name: Install Rust
run: rustup update 1.40.0 && rustup default 1.40.0
- run: cargo build

View File

@ -0,0 +1,135 @@
[package]
name = "backtrace"
version = "0.3.50"
authors = ["The Rust Project Developers"]
license = "MIT/Apache-2.0"
readme = "README.md"
repository = "https://github.com/rust-lang/backtrace-rs"
homepage = "https://github.com/rust-lang/backtrace-rs"
documentation = "https://docs.rs/backtrace"
description = """
A library to acquire a stack trace (backtrace) at runtime in a Rust program.
"""
autoexamples = true
autotests = true
edition = "2018"
[workspace]
members = ['crates/cpp_smoke_test', 'crates/as-if-std']
exclude = ['crates/without_debuginfo', 'crates/macos_frames_test', 'crates/line-tables-only']
[dependencies]
cfg-if = "0.1.10"
rustc-demangle = "0.1.4"
backtrace-sys = { path = "crates/backtrace-sys", version = "0.1.35", optional = true, default_features = false }
libc = { version = "0.2.45", default-features = false }
# Optionally enable the ability to serialize a `Backtrace`, controlled through
# the `serialize-*` features below.
serde = { version = "1.0", optional = true, features = ['derive'] }
rustc-serialize = { version = "0.3", optional = true }
# Optionally demangle C++ frames' symbols in backtraces.
cpp_demangle = { default-features = false, version = "0.3.0", optional = true }
# Optional dependencies enabled through the `gimli-symbolize` feature, do not
# use these features directly.
addr2line = { version = "0.13.0", optional = true, default-features = false }
miniz_oxide = { version = "0.4.0", optional = true, default-features = false }
[dependencies.object]
version = "0.20.0"
optional = true
default-features = false
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned']
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3.3", optional = true }
[dev-dependencies]
dylib-dep = { path = "crates/dylib-dep" }
libloading = "0.6"
[features]
# By default libstd support and gimli-symbolize is used to symbolize addresses.
default = ["std", "gimli-symbolize"]
# Include std support. This enables types like `Backtrace`.
std = []
#=======================================
# Methods of resolving symbols
#
# - gimli-symbolize: use the `gimli-rs/addr2line` crate to symbolicate
# addresses into file, line, and name using DWARF debug information.
# - libbacktrace: this feature activates the `backtrace-sys` dependency,
# building the libbacktrace library found in gcc repos.
#
# Note that MSVC unconditionally uses the dbghelp library to symbolize and won't
# be affected by feature selection here. Also note that it's highly unlikely you
# want to configure this. If you're having trouble getting backtraces it's
# likely best to open an issue.
gimli-symbolize = ["addr2line", "miniz_oxide", "object"]
libbacktrace = ["backtrace-sys/backtrace-sys"]
#=======================================
# Methods of serialization
#
# Various features used for enabling rustc-serialize or syntex codegen.
serialize-rustc = ["rustc-serialize"]
serialize-serde = ["serde"]
#=======================================
# Deprecated/internal features
#
# Only here for backwards compatibility purposes or for internal testing
# purposes. New code should use none of these features.
coresymbolication = []
dladdr = []
kernel32 = []
unix-backtrace = []
libunwind = []
dbghelp = []
verify-winapi = [
'winapi/dbghelp',
'winapi/handleapi',
'winapi/libloaderapi',
'winapi/memoryapi',
'winapi/minwindef',
'winapi/processthreadsapi',
'winapi/synchapi',
'winapi/tlhelp32',
'winapi/winbase',
'winapi/winnt',
]
[[example]]
name = "backtrace"
required-features = ["std"]
[[example]]
name = "raw"
required-features = ["std"]
[[test]]
name = "skip_inner_frames"
required-features = ["std"]
[[test]]
name = "long_fn_name"
required-features = ["std"]
[[test]]
name = "smoke"
required-features = ["std"]
edition = '2018'
[[test]]
name = "accuracy"
required-features = ["std", "gimli-symbolize"]
edition = '2018'
[[test]]
name = "concurrent-panics"
required-features = ["std"]
harness = false

6
library/backtrace/ci/run.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/sh
set -ex
cargo test --target $TARGET
cargo build --target $TARGET --manifest-path crates/as-if-std/Cargo.toml

View File

@ -0,0 +1,28 @@
[package]
name = "as-if-std"
version = "0.1.0"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
edition = "2018"
publish = false
[lib]
test = false
doc = false
doctest = false
bench = false
[dependencies]
cfg-if = "0.1.10"
rustc-demangle = "0.1.4"
libc = { version = "0.2.45", default-features = false }
addr2line = { version = "0.12.0", default-features = false }
miniz_oxide = { version = "0.4.0", default-features = false }
[dependencies.object]
version = "0.20.0"
default-features = false
features = ['read_core', 'elf', 'macho', 'pe', 'unaligned']
[features]
default = ['gimli-symbolize']
gimli-symbolize = []

View File

@ -0,0 +1,3 @@
fn main() {
println!("cargo:rustc-cfg=backtrace_in_libstd");
}

View File

@ -0,0 +1,21 @@
// A crate which builds the `backtrace` crate as-if it's included as a
// submodule into the standard library. We try to set this crate up similarly
// to the standard library itself to minimize the likelihood of issues when
// updating the `backtrace` crate.
#![no_std]
extern crate alloc;
// We want to `pub use std::*` in the root but we don't want `std` available in
// the root namespace, so do this in a funky inner module.
mod __internal {
extern crate std;
pub use std::*;
}
pub use __internal::*;
// This is the magical part which we hope works.
#[path = "../../../src/lib.rs"]
mod the_backtrace_crate;

View File

@ -0,0 +1,27 @@
[package]
name = "backtrace-sys"
version = "0.1.37"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
build = "build.rs"
license = "MIT/Apache-2.0"
repository = "https://github.com/alexcrichton/backtrace-rs"
homepage = "https://github.com/alexcrichton/backtrace-rs"
documentation = "http://alexcrichton.com/backtrace-rs"
description = """
Bindings to the libbacktrace gcc library
"""
[dependencies]
libc = { version = "0.2", default-features = false }
core = { version = '1.0.0', optional = true, package = 'rustc-std-workspace-core' }
compiler_builtins = { version = '0.1.2', optional = true }
[build-dependencies]
cc = "1.0.37"
[features]
default = ["backtrace-sys"]
# Without this feature, this crate does nothing.
backtrace-sys = []
rustc-dep-of-std = ['core', 'compiler_builtins']

Some files were not shown because too many files have changed in this diff Show More