Auto merge of #106866 - matthiaskrgr:rollup-r063s44, r=matthiaskrgr
Rollup of 8 pull requests Successful merges: - #105526 (libcore: make result of iter::from_generator Clone) - #106563 (Fix `unused_braces` on generic const expr macro call) - #106661 (Stop probing for statx unless necessary) - #106820 (Deprioritize fulfillment errors that come from expansions.) - #106828 (rustdoc: remove `docblock` class from notable trait popover) - #106849 (Allocate one less vec while parsing arrays) - #106855 (rustdoc: few small cleanups) - #106860 (Remove various double spaces in the libraries.) Failed merges: r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
afaf3e07aa
61 changed files with 221 additions and 161 deletions
|
@ -1105,6 +1105,7 @@ impl UnusedDelimLint for UnusedBraces {
|
|||
|| matches!(expr.kind, ast::ExprKind::Lit(_)))
|
||||
&& !cx.sess().source_map().is_multiline(value.span)
|
||||
&& value.attrs.is_empty()
|
||||
&& !expr.span.from_expansion()
|
||||
&& !value.span.from_expansion()
|
||||
&& !inner.span.from_expansion()
|
||||
{
|
||||
|
|
|
@ -1475,9 +1475,8 @@ impl<'a> Parser<'a> {
|
|||
} else if self.eat(&token::Comma) {
|
||||
// Vector with two or more elements.
|
||||
let sep = SeqSep::trailing_allowed(token::Comma);
|
||||
let (remaining_exprs, _) = self.parse_seq_to_end(close, sep, |p| p.parse_expr())?;
|
||||
let mut exprs = vec![first_expr];
|
||||
exprs.extend(remaining_exprs);
|
||||
let (mut exprs, _) = self.parse_seq_to_end(close, sep, |p| p.parse_expr())?;
|
||||
exprs.insert(0, first_expr);
|
||||
ExprKind::Array(exprs)
|
||||
} else {
|
||||
// Vector with one element
|
||||
|
|
|
@ -454,9 +454,11 @@ impl<'tcx> TypeErrCtxtExt<'tcx> for TypeErrCtxt<'_, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
for (error, suppressed) in iter::zip(errors, is_suppressed) {
|
||||
if !suppressed {
|
||||
self.report_fulfillment_error(error, body_id);
|
||||
for from_expansion in [false, true] {
|
||||
for (error, suppressed) in iter::zip(errors, &is_suppressed) {
|
||||
if !suppressed && error.obligation.cause.span.from_expansion() == from_expansion {
|
||||
self.report_fulfillment_error(error, body_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ use core::marker::Destruct;
|
|||
mod tests;
|
||||
|
||||
extern "Rust" {
|
||||
// These are the magic symbols to call the global allocator. rustc generates
|
||||
// These are the magic symbols to call the global allocator. rustc generates
|
||||
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
|
||||
// (the code expanding that attribute macro generates those functions), or to call
|
||||
// the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
|
||||
|
@ -353,7 +353,7 @@ pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Dest
|
|||
|
||||
#[cfg(not(no_global_oom_handling))]
|
||||
extern "Rust" {
|
||||
// This is the magic symbol to call the global alloc error handler. rustc generates
|
||||
// This is the magic symbol to call the global alloc error handler. rustc generates
|
||||
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
|
||||
// default implementations below (`__rdl_oom`) otherwise.
|
||||
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
|
||||
|
|
|
@ -2179,7 +2179,7 @@ pub struct Weak<T: ?Sized> {
|
|||
// This is a `NonNull` to allow optimizing the size of this type in enums,
|
||||
// but it is not necessarily a valid pointer.
|
||||
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
|
||||
// to allocate space on the heap. That's not a value a real pointer
|
||||
// to allocate space on the heap. That's not a value a real pointer
|
||||
// will ever have because RcBox has alignment at least 2.
|
||||
// This is only possible when `T: Sized`; unsized `T` never dangle.
|
||||
ptr: NonNull<RcBox<T>>,
|
||||
|
|
|
@ -295,7 +295,7 @@ pub struct Weak<T: ?Sized> {
|
|||
// This is a `NonNull` to allow optimizing the size of this type in enums,
|
||||
// but it is not necessarily a valid pointer.
|
||||
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
|
||||
// to allocate space on the heap. That's not a value a real pointer
|
||||
// to allocate space on the heap. That's not a value a real pointer
|
||||
// will ever have because RcBox has alignment at least 2.
|
||||
// This is only possible when `T: Sized`; unsized `T` never dangle.
|
||||
ptr: NonNull<ArcInner<T>>,
|
||||
|
@ -1656,7 +1656,7 @@ impl<T: ?Sized> Arc<T> {
|
|||
//
|
||||
// The acquire label here ensures a happens-before relationship with any
|
||||
// writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
|
||||
// of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
|
||||
// of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
|
||||
// weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
|
||||
if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
|
||||
// This needs to be an `Acquire` to synchronize with the decrement of the `strong`
|
||||
|
@ -1712,7 +1712,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
|
|||
}
|
||||
|
||||
// This fence is needed to prevent reordering of use of the data and
|
||||
// deletion of the data. Because it is marked `Release`, the decreasing
|
||||
// deletion of the data. Because it is marked `Release`, the decreasing
|
||||
// of the reference count synchronizes with this `Acquire` fence. This
|
||||
// means that use of the data happens before decreasing the reference
|
||||
// count, which happens before this fence, which happens before the
|
||||
|
@ -2172,7 +2172,7 @@ impl<T: ?Sized> Clone for Weak<T> {
|
|||
} else {
|
||||
return Weak { ptr: self.ptr };
|
||||
};
|
||||
// See comments in Arc::clone() for why this is relaxed. This can use a
|
||||
// See comments in Arc::clone() for why this is relaxed. This can use a
|
||||
// fetch_add (ignoring the lock) because the weak count is only locked
|
||||
// where are *no other* weak pointers in existence. (So we can't be
|
||||
// running this code in that case).
|
||||
|
|
|
@ -40,7 +40,7 @@ pub struct IntoIter<
|
|||
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
|
||||
pub(super) alloc: ManuallyDrop<A>,
|
||||
pub(super) ptr: *const T,
|
||||
pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
|
||||
pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
|
||||
// ptr == end is a quick test for the Iterator being empty, that works
|
||||
// for both ZST and non-ZST.
|
||||
}
|
||||
|
@ -146,9 +146,9 @@ impl<T, A: Allocator> IntoIter<T, A> {
|
|||
let mut this = ManuallyDrop::new(self);
|
||||
|
||||
// SAFETY: This allocation originally came from a `Vec`, so it passes
|
||||
// all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
|
||||
// all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
|
||||
// so the `sub_ptr`s below cannot wrap, and will produce a well-formed
|
||||
// range. `end` ≤ `buf + cap`, so the range will be in-bounds.
|
||||
// range. `end` ≤ `buf + cap`, so the range will be in-bounds.
|
||||
// Taking `alloc` is ok because nothing else is going to look at it,
|
||||
// since our `Drop` impl isn't going to run so there's no more code.
|
||||
unsafe {
|
||||
|
|
|
@ -57,7 +57,7 @@ unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
|
|||
#[inline]
|
||||
fn is_zero(&self) -> bool {
|
||||
// Because this is generated as a runtime check, it's not obvious that
|
||||
// it's worth doing if the array is really long. The threshold here
|
||||
// it's worth doing if the array is really long. The threshold here
|
||||
// is largely arbitrary, but was picked because as of 2022-07-01 LLVM
|
||||
// fails to const-fold the check in `vec![[1; 32]; n]`
|
||||
// See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
|
||||
|
|
|
@ -2429,7 +2429,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
|
|||
self.reserve(range.len());
|
||||
|
||||
// SAFETY:
|
||||
// - `slice::range` guarantees that the given range is valid for indexing self
|
||||
// - `slice::range` guarantees that the given range is valid for indexing self
|
||||
unsafe {
|
||||
self.spec_extend_from_within(range);
|
||||
}
|
||||
|
@ -2686,7 +2686,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
|
|||
|
||||
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
|
||||
// required for this method definition, is not available. Instead use the
|
||||
// `slice::to_vec` function which is only available with cfg(test)
|
||||
// `slice::to_vec` function which is only available with cfg(test)
|
||||
// NB see the slice::hack module in slice.rs for more information
|
||||
#[cfg(test)]
|
||||
fn clone(&self) -> Self {
|
||||
|
|
|
@ -1849,7 +1849,7 @@ fn test_stable_pointers() {
|
|||
}
|
||||
|
||||
// Test that, if we reserved enough space, adding and removing elements does not
|
||||
// invalidate references into the vector (such as `v0`). This test also
|
||||
// invalidate references into the vector (such as `v0`). This test also
|
||||
// runs in Miri, which would detect such problems.
|
||||
// Note that this test does *not* constitute a stable guarantee that all these functions do not
|
||||
// reallocate! Only what is explicitly documented at
|
||||
|
|
|
@ -109,8 +109,8 @@ impl<T, const N: usize> IntoIter<T, N> {
|
|||
/// use std::array::IntoIter;
|
||||
/// use std::mem::MaybeUninit;
|
||||
///
|
||||
/// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
|
||||
/// # // otherwise it could leak. A fully-general version this would need a drop
|
||||
/// # // Hi! Thanks for reading the code. This is restricted to `Copy` because
|
||||
/// # // otherwise it could leak. A fully-general version this would need a drop
|
||||
/// # // guard to handle panics from the iterator, but this works for an example.
|
||||
/// fn next_chunk<T: Copy, const N: usize>(
|
||||
/// it: &mut impl Iterator<Item = T>,
|
||||
|
@ -211,7 +211,7 @@ impl<T, const N: usize> IntoIter<T, N> {
|
|||
let initialized = 0..0;
|
||||
|
||||
// SAFETY: We're telling it that none of the elements are initialized,
|
||||
// which is trivially true. And ∀N: usize, 0 <= N.
|
||||
// which is trivially true. And ∀N: usize, 0 <= N.
|
||||
unsafe { Self::new_unchecked(buffer, initialized) }
|
||||
}
|
||||
|
||||
|
|
|
@ -756,7 +756,7 @@ impl<A: Step> Iterator for ops::Range<A> {
|
|||
where
|
||||
Self: TrustedRandomAccessNoCoerce,
|
||||
{
|
||||
// SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
|
||||
// SAFETY: The TrustedRandomAccess contract requires that callers only pass an index
|
||||
// that is in bounds.
|
||||
// Additionally Self: TrustedRandomAccess is only implemented for Copy types
|
||||
// which means even repeated reads of the same index would be safe.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use crate::fmt;
|
||||
use crate::ops::{Generator, GeneratorState};
|
||||
use crate::pin::Pin;
|
||||
|
||||
|
@ -23,14 +24,21 @@ use crate::pin::Pin;
|
|||
/// ```
|
||||
#[inline]
|
||||
#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
|
||||
pub fn from_generator<G: Generator<Return = ()> + Unpin>(
|
||||
generator: G,
|
||||
) -> impl Iterator<Item = G::Yield> {
|
||||
pub fn from_generator<G: Generator<Return = ()> + Unpin>(generator: G) -> FromGenerator<G> {
|
||||
FromGenerator(generator)
|
||||
}
|
||||
|
||||
struct FromGenerator<G>(G);
|
||||
/// An iterator over the values yielded by an underlying generator.
|
||||
///
|
||||
/// This `struct` is created by the [`iter::from_generator()`] function. See its documentation for
|
||||
/// more.
|
||||
///
|
||||
/// [`iter::from_generator()`]: from_generator
|
||||
#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
|
||||
#[derive(Clone)]
|
||||
pub struct FromGenerator<G>(G);
|
||||
|
||||
#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
|
||||
impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
|
||||
type Item = G::Yield;
|
||||
|
||||
|
@ -41,3 +49,10 @@ impl<G: Generator<Return = ()> + Unpin> Iterator for FromGenerator<G> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "iter_from_generator", issue = "43122", reason = "generators are unstable")]
|
||||
impl<G> fmt::Debug for FromGenerator<G> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("FromGenerator").finish()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ mod fpu_precision {
|
|||
/// Developer's Manual (Volume 1).
|
||||
///
|
||||
/// The only field which is relevant for the following code is PC, Precision Control. This
|
||||
/// field determines the precision of the operations performed by the FPU. It can be set to:
|
||||
/// field determines the precision of the operations performed by the FPU. It can be set to:
|
||||
/// - 0b00, single precision i.e., 32-bits
|
||||
/// - 0b10, double precision i.e., 64-bits
|
||||
/// - 0b11, double extended precision i.e., 80-bits (default state)
|
||||
|
|
|
@ -1538,7 +1538,7 @@ macro_rules! int_impl {
|
|||
///
|
||||
/// ```
|
||||
/// #![feature(bigint_helper_methods)]
|
||||
/// // Only the most significant word is signed.
|
||||
/// // Only the most significant word is signed.
|
||||
/// //
|
||||
#[doc = concat!("// 10 MAX (a = 10 × 2^", stringify!($BITS), " + 2^", stringify!($BITS), " - 1)")]
|
||||
#[doc = concat!("// + -5 9 (b = -5 × 2^", stringify!($BITS), " + 9)")]
|
||||
|
@ -1646,7 +1646,7 @@ macro_rules! int_impl {
|
|||
///
|
||||
/// ```
|
||||
/// #![feature(bigint_helper_methods)]
|
||||
/// // Only the most significant word is signed.
|
||||
/// // Only the most significant word is signed.
|
||||
/// //
|
||||
#[doc = concat!("// 6 8 (a = 6 × 2^", stringify!($BITS), " + 8)")]
|
||||
#[doc = concat!("// - -5 9 (b = -5 × 2^", stringify!($BITS), " + 9)")]
|
||||
|
|
|
@ -753,7 +753,7 @@ impl<P: DerefMut> Pin<P> {
|
|||
impl<'a, T: ?Sized> Pin<&'a T> {
|
||||
/// Constructs a new pin by mapping the interior value.
|
||||
///
|
||||
/// For example, if you wanted to get a `Pin` of a field of something,
|
||||
/// For example, if you wanted to get a `Pin` of a field of something,
|
||||
/// you could use this to get access to that field in one line of code.
|
||||
/// However, there are several gotchas with these "pinning projections";
|
||||
/// see the [`pin` module] documentation for further details on that topic.
|
||||
|
@ -856,7 +856,7 @@ impl<'a, T: ?Sized> Pin<&'a mut T> {
|
|||
|
||||
/// Construct a new pin by mapping the interior value.
|
||||
///
|
||||
/// For example, if you wanted to get a `Pin` of a field of something,
|
||||
/// For example, if you wanted to get a `Pin` of a field of something,
|
||||
/// you could use this to get access to that field in one line of code.
|
||||
/// However, there are several gotchas with these "pinning projections";
|
||||
/// see the [`pin` module] documentation for further details on that topic.
|
||||
|
|
|
@ -1701,7 +1701,7 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz
|
|||
// offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
|
||||
// offset will be able to produce a `p` aligned to the specified `a`.
|
||||
//
|
||||
// The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
|
||||
// The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
|
||||
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
|
||||
// redistributes operations around the load-bearing, but pessimizing `and` instruction
|
||||
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
|
||||
|
|
|
@ -65,7 +65,7 @@ fn size_from_ptr<T>(_: *const T) -> usize {
|
|||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
pub struct Iter<'a, T: 'a> {
|
||||
ptr: NonNull<T>,
|
||||
end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
|
||||
end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
|
||||
// ptr == end is a quick test for the Iterator being empty, that works
|
||||
// for both ZST and non-ZST.
|
||||
_marker: PhantomData<&'a T>,
|
||||
|
@ -186,7 +186,7 @@ impl<T> AsRef<[T]> for Iter<'_, T> {
|
|||
#[must_use = "iterators are lazy and do nothing unless consumed"]
|
||||
pub struct IterMut<'a, T: 'a> {
|
||||
ptr: NonNull<T>,
|
||||
end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
|
||||
end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
|
||||
// ptr == end is a quick test for the Iterator being empty, that works
|
||||
// for both ZST and non-ZST.
|
||||
_marker: PhantomData<&'a mut T>,
|
||||
|
|
|
@ -23,7 +23,7 @@ macro_rules! len {
|
|||
$self.end.addr().wrapping_sub(start.as_ptr().addr())
|
||||
} else {
|
||||
// We know that `start <= end`, so can do better than `offset_from`,
|
||||
// which needs to deal in signed. By setting appropriate flags here
|
||||
// which needs to deal in signed. By setting appropriate flags here
|
||||
// we can tell LLVM this, which helps it remove bounds checks.
|
||||
// SAFETY: By the type invariant, `start <= end`
|
||||
let diff = unsafe { unchecked_sub($self.end.addr(), start.as_ptr().addr()) };
|
||||
|
|
|
@ -703,7 +703,7 @@ impl<T> [T] {
|
|||
|
||||
// Because this function is first compiled in isolation,
|
||||
// this check tells LLVM that the indexing below is
|
||||
// in-bounds. Then after inlining -- once the actual
|
||||
// in-bounds. Then after inlining -- once the actual
|
||||
// lengths of the slices are known -- it's removed.
|
||||
let (a, b) = (&mut a[..n], &mut b[..n]);
|
||||
|
||||
|
@ -1248,7 +1248,7 @@ impl<T> [T] {
|
|||
ArrayChunksMut::new(self)
|
||||
}
|
||||
|
||||
/// Returns an iterator over overlapping windows of `N` elements of a slice,
|
||||
/// Returns an iterator over overlapping windows of `N` elements of a slice,
|
||||
/// starting at the beginning of the slice.
|
||||
///
|
||||
/// This is the const generic equivalent of [`windows`].
|
||||
|
@ -2476,7 +2476,7 @@ impl<T> [T] {
|
|||
let mid = left + size / 2;
|
||||
|
||||
// SAFETY: the while condition means `size` is strictly positive, so
|
||||
// `size/2 < size`. Thus `left + size/2 < left + size`, which
|
||||
// `size/2 < size`. Thus `left + size/2 < left + size`, which
|
||||
// coupled with the `left + size <= self.len()` invariant means
|
||||
// we have `left + size/2 < self.len()`, and this is in-bounds.
|
||||
let cmp = f(unsafe { self.get_unchecked(mid) });
|
||||
|
|
|
@ -18,9 +18,9 @@ struct CopyOnDrop<T> {
|
|||
|
||||
impl<T> Drop for CopyOnDrop<T> {
|
||||
fn drop(&mut self) {
|
||||
// SAFETY: This is a helper class.
|
||||
// Please refer to its usage for correctness.
|
||||
// Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
|
||||
// SAFETY: This is a helper class.
|
||||
// Please refer to its usage for correctness.
|
||||
// Namely, one must be sure that `src` and `dst` does not overlap as required by `ptr::copy_nonoverlapping`.
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(self.src, self.dest, 1);
|
||||
}
|
||||
|
|
|
@ -1488,7 +1488,7 @@ mod slice_index {
|
|||
// optional:
|
||||
//
|
||||
// one or more similar inputs for which data[input] succeeds,
|
||||
// and the corresponding output as an array. This helps validate
|
||||
// and the corresponding output as an array. This helps validate
|
||||
// "critical points" where an input range straddles the boundary
|
||||
// between valid and invalid.
|
||||
// (such as the input `len..len`, which is just barely valid)
|
||||
|
|
|
@ -1512,7 +1512,7 @@ impl FileType {
|
|||
}
|
||||
|
||||
/// Tests whether this file type represents a regular file.
|
||||
/// The result is mutually exclusive to the results of
|
||||
/// The result is mutually exclusive to the results of
|
||||
/// [`is_dir`] and [`is_symlink`]; only zero or one of these
|
||||
/// tests may pass.
|
||||
///
|
||||
|
|
|
@ -288,8 +288,8 @@ fn test_buffered_reader_seek_underflow_discard_buffer_between_seeks() {
|
|||
let mut reader = BufReader::with_capacity(5, ErrAfterFirstSeekReader { first_seek: true });
|
||||
assert_eq!(reader.fill_buf().ok(), Some(&[0, 0, 0, 0, 0][..]));
|
||||
|
||||
// The following seek will require two underlying seeks. The first will
|
||||
// succeed but the second will fail. This should still invalidate the
|
||||
// The following seek will require two underlying seeks. The first will
|
||||
// succeed but the second will fail. This should still invalidate the
|
||||
// buffer.
|
||||
assert!(reader.seek(SeekFrom::Current(i64::MIN)).is_err());
|
||||
assert_eq!(reader.buffer().len(), 0);
|
||||
|
|
|
@ -100,7 +100,7 @@ impl BorrowedFd<'_> {
|
|||
|
||||
// For ESP-IDF, F_DUPFD is used instead, because the CLOEXEC semantics
|
||||
// will never be supported, as this is a bare metal framework with
|
||||
// no capabilities for multi-process execution. While F_DUPFD is also
|
||||
// no capabilities for multi-process execution. While F_DUPFD is also
|
||||
// not supported yet, it might be (currently it returns ENOSYS).
|
||||
#[cfg(target_os = "espidf")]
|
||||
let cmd = libc::F_DUPFD;
|
||||
|
|
|
@ -306,11 +306,11 @@ pub mod panic_count {
|
|||
// and after increase and decrease, but not necessarily during their execution.
|
||||
//
|
||||
// Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG)
|
||||
// records whether panic::always_abort() has been called. This can only be
|
||||
// records whether panic::always_abort() has been called. This can only be
|
||||
// set, never cleared.
|
||||
// panic::always_abort() is usually called to prevent memory allocations done by
|
||||
// the panic handling in the child created by `libc::fork`.
|
||||
// Memory allocations performed in a child created with `libc::fork` are undefined
|
||||
// Memory allocations performed in a child created with `libc::fork` are undefined
|
||||
// behavior in most operating systems.
|
||||
// Accessing LOCAL_PANIC_COUNT in a child created by `libc::fork` would lead to a memory
|
||||
// allocation. Only GLOBAL_PANIC_COUNT can be accessed in this situation. This is
|
||||
|
|
|
@ -607,7 +607,7 @@ pub struct Components<'a> {
|
|||
|
||||
// true if path *physically* has a root separator; for most Windows
|
||||
// prefixes, it may have a "logical" root separator for the purposes of
|
||||
// normalization, e.g., \\server\share == \\server\share\.
|
||||
// normalization, e.g., \\server\share == \\server\share\.
|
||||
has_physical_root: bool,
|
||||
|
||||
// The iterator is double-ended, and these two states keep track of what has
|
||||
|
|
|
@ -294,7 +294,7 @@ impl Drop for Thread {
|
|||
// Terminate and delete the task
|
||||
// Safety: `self.task` still represents a task we own (because
|
||||
// this method or `join_inner` is called only once for
|
||||
// each `Thread`). The task indicated that it's safe to
|
||||
// each `Thread`). The task indicated that it's safe to
|
||||
// delete by entering the `FINISHED` state.
|
||||
unsafe { terminate_and_delete_task(self.task) };
|
||||
|
||||
|
|
|
@ -149,12 +149,13 @@ cfg_has_statx! {{
|
|||
) -> Option<io::Result<FileAttr>> {
|
||||
use crate::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
// Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`
|
||||
// We store the availability in global to avoid unnecessary syscalls.
|
||||
// 0: Unknown
|
||||
// 1: Not available
|
||||
// 2: Available
|
||||
static STATX_STATE: AtomicU8 = AtomicU8::new(0);
|
||||
// Linux kernel prior to 4.11 or glibc prior to glibc 2.28 don't support `statx`.
|
||||
// We check for it on first failure and remember availability to avoid having to
|
||||
// do it again.
|
||||
#[repr(u8)]
|
||||
enum STATX_STATE{ Unknown = 0, Present, Unavailable }
|
||||
static STATX_SAVED_STATE: AtomicU8 = AtomicU8::new(STATX_STATE::Unknown as u8);
|
||||
|
||||
syscall! {
|
||||
fn statx(
|
||||
fd: c_int,
|
||||
|
@ -165,31 +166,44 @@ cfg_has_statx! {{
|
|||
) -> c_int
|
||||
}
|
||||
|
||||
match STATX_STATE.load(Ordering::Relaxed) {
|
||||
0 => {
|
||||
// It is a trick to call `statx` with null pointers to check if the syscall
|
||||
// is available. According to the manual, it is expected to fail with EFAULT.
|
||||
// We do this mainly for performance, since it is nearly hundreds times
|
||||
// faster than a normal successful call.
|
||||
let err = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut()))
|
||||
.err()
|
||||
.and_then(|e| e.raw_os_error());
|
||||
// We don't check `err == Some(libc::ENOSYS)` because the syscall may be limited
|
||||
// and returns `EPERM`. Listing all possible errors seems not a good idea.
|
||||
// See: https://github.com/rust-lang/rust/issues/65662
|
||||
if err != Some(libc::EFAULT) {
|
||||
STATX_STATE.store(1, Ordering::Relaxed);
|
||||
return None;
|
||||
}
|
||||
STATX_STATE.store(2, Ordering::Relaxed);
|
||||
}
|
||||
1 => return None,
|
||||
_ => {}
|
||||
if STATX_SAVED_STATE.load(Ordering::Relaxed) == STATX_STATE::Unavailable as u8 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut buf: libc::statx = mem::zeroed();
|
||||
if let Err(err) = cvt(statx(fd, path, flags, mask, &mut buf)) {
|
||||
return Some(Err(err));
|
||||
if STATX_SAVED_STATE.load(Ordering::Relaxed) == STATX_STATE::Present as u8 {
|
||||
return Some(Err(err));
|
||||
}
|
||||
|
||||
// Availability not checked yet.
|
||||
//
|
||||
// First try the cheap way.
|
||||
if err.raw_os_error() == Some(libc::ENOSYS) {
|
||||
STATX_SAVED_STATE.store(STATX_STATE::Unavailable as u8, Ordering::Relaxed);
|
||||
return None;
|
||||
}
|
||||
|
||||
// Error other than `ENOSYS` is not a good enough indicator -- it is
|
||||
// known that `EPERM` can be returned as a result of using seccomp to
|
||||
// block the syscall.
|
||||
// Availability is checked by performing a call which expects `EFAULT`
|
||||
// if the syscall is usable.
|
||||
// See: https://github.com/rust-lang/rust/issues/65662
|
||||
// FIXME this can probably just do the call if `EPERM` was received, but
|
||||
// previous iteration of the code checked it for all errors and for now
|
||||
// this is retained.
|
||||
// FIXME what about transient conditions like `ENOMEM`?
|
||||
let err2 = cvt(statx(0, ptr::null(), 0, libc::STATX_ALL, ptr::null_mut()))
|
||||
.err()
|
||||
.and_then(|e| e.raw_os_error());
|
||||
if err2 == Some(libc::EFAULT) {
|
||||
STATX_SAVED_STATE.store(STATX_STATE::Present as u8, Ordering::Relaxed);
|
||||
return Some(Err(err));
|
||||
} else {
|
||||
STATX_SAVED_STATE.store(STATX_STATE::Unavailable as u8, Ordering::Relaxed);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// We cannot fill `stat64` exhaustively because of private padding fields.
|
||||
|
@ -600,13 +614,13 @@ impl Iterator for ReadDir {
|
|||
loop {
|
||||
// As of POSIX.1-2017, readdir() is not required to be thread safe; only
|
||||
// readdir_r() is. However, readdir_r() cannot correctly handle platforms
|
||||
// with unlimited or variable NAME_MAX. Many modern platforms guarantee
|
||||
// with unlimited or variable NAME_MAX. Many modern platforms guarantee
|
||||
// thread safety for readdir() as long an individual DIR* is not accessed
|
||||
// concurrently, which is sufficient for Rust.
|
||||
super::os::set_errno(0);
|
||||
let entry_ptr = readdir64(self.inner.dirp.0);
|
||||
if entry_ptr.is_null() {
|
||||
// We either encountered an error, or reached the end. Either way,
|
||||
// We either encountered an error, or reached the end. Either way,
|
||||
// the next call to next() should return None.
|
||||
self.end_of_stream = true;
|
||||
|
||||
|
|
|
@ -587,7 +587,7 @@ pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) ->
|
|||
// - copy_file_range file is immutable or syscall is blocked by seccomp¹ (EPERM)
|
||||
// - copy_file_range cannot be used with pipes or device nodes (EINVAL)
|
||||
// - the writer fd was opened with O_APPEND (EBADF²)
|
||||
// and no bytes were written successfully yet. (All these errnos should
|
||||
// and no bytes were written successfully yet. (All these errnos should
|
||||
// not be returned if something was already written, but they happen in
|
||||
// the wild, see #91152.)
|
||||
//
|
||||
|
|
|
@ -262,7 +262,7 @@ impl ExitStatus {
|
|||
// available on Fuchsia.
|
||||
//
|
||||
// It does not appear that Fuchsia is Unix-like enough to implement ExitStatus (or indeed many
|
||||
// other things from std::os::unix) properly. This veneer is always going to be a bodge. So
|
||||
// other things from std::os::unix) properly. This veneer is always going to be a bodge. So
|
||||
// while I don't know if these implementations are actually correct, I think they will do for
|
||||
// now at least.
|
||||
pub fn core_dumped(&self) -> bool {
|
||||
|
@ -277,9 +277,9 @@ impl ExitStatus {
|
|||
|
||||
pub fn into_raw(&self) -> c_int {
|
||||
// We don't know what someone who calls into_raw() will do with this value, but it should
|
||||
// have the conventional Unix representation. Despite the fact that this is not
|
||||
// have the conventional Unix representation. Despite the fact that this is not
|
||||
// standardised in SuS or POSIX, all Unix systems encode the signal and exit status the
|
||||
// same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every
|
||||
// same way. (Ie the WIFEXITED, WEXITSTATUS etc. macros have identical behaviour on every
|
||||
// Unix.)
|
||||
//
|
||||
// The caller of `std::os::unix::into_raw` is probably wanting a Unix exit status, and may
|
||||
|
@ -287,14 +287,14 @@ impl ExitStatus {
|
|||
// different Unix variant.
|
||||
//
|
||||
// The other view would be to say that the caller on Fuchsia ought to know that `into_raw`
|
||||
// will give a raw Fuchsia status (whatever that is - I don't know, personally). That is
|
||||
// will give a raw Fuchsia status (whatever that is - I don't know, personally). That is
|
||||
// not possible here because we must return a c_int because that's what Unix (including
|
||||
// SuS and POSIX) say a wait status is, but Fuchsia apparently uses a u64, so it won't
|
||||
// necessarily fit.
|
||||
//
|
||||
// It seems to me that the right answer would be to provide std::os::fuchsia with its
|
||||
// own ExitStatusExt, rather that trying to provide a not very convincing imitation of
|
||||
// Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
|
||||
// Unix. Ie, std::os::unix::process:ExitStatusExt ought not to exist on Fuchsia. But
|
||||
// fixing this up that is beyond the scope of my efforts now.
|
||||
let exit_status_as_if_unix: u8 = self.0.try_into().expect("Fuchsia process return code bigger than 8 bits, but std::os::unix::ExitStatusExt::into_raw() was called to try to convert the value into a traditional Unix-style wait status, which cannot represent values greater than 255.");
|
||||
let wait_status_as_if_unix = (exit_status_as_if_unix as c_int) << 8;
|
||||
|
|
|
@ -666,11 +666,11 @@ impl ExitStatus {
|
|||
}
|
||||
|
||||
pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
|
||||
// This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
|
||||
// This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
|
||||
// true on all actual versions of Unix, is widely assumed, and is specified in SuS
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not
|
||||
// true for a platform pretending to be Unix, the tests (our doctests, and also
|
||||
// procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
|
||||
// procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
|
||||
match NonZero_c_int::try_from(self.0) {
|
||||
/* was nonzero */ Ok(failure) => Err(ExitStatusError(failure)),
|
||||
/* was zero, couldn't convert */ Err(_) => Ok(()),
|
||||
|
|
|
@ -19,17 +19,17 @@ fn exitstatus_display_tests() {
|
|||
t(0x00000, "exit status: 0");
|
||||
t(0x0ff00, "exit status: 255");
|
||||
|
||||
// On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar.
|
||||
// On MacOS, 0x0137f is WIFCONTINUED, not WIFSTOPPED. Probably *BSD is similar.
|
||||
// https://github.com/rust-lang/rust/pull/82749#issuecomment-790525956
|
||||
// The purpose of this test is to test our string formatting, not our understanding of the wait
|
||||
// status magic numbers. So restrict these to Linux.
|
||||
// status magic numbers. So restrict these to Linux.
|
||||
if cfg!(target_os = "linux") {
|
||||
t(0x0137f, "stopped (not terminated) by signal: 19 (SIGSTOP)");
|
||||
t(0x0ffff, "continued (WIFCONTINUED)");
|
||||
}
|
||||
|
||||
// Testing "unrecognised wait status" is hard because the wait.h macros typically
|
||||
// assume that the value came from wait and isn't mad. With the glibc I have here
|
||||
// assume that the value came from wait and isn't mad. With the glibc I have here
|
||||
// this works:
|
||||
if cfg!(all(target_os = "linux", target_env = "gnu")) {
|
||||
t(0x000ff, "unrecognised wait status: 255 0xff");
|
||||
|
|
|
@ -195,11 +195,11 @@ impl ExitStatus {
|
|||
}
|
||||
|
||||
pub fn exit_ok(&self) -> Result<(), ExitStatusError> {
|
||||
// This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
|
||||
// This assumes that WIFEXITED(status) && WEXITSTATUS==0 corresponds to status==0. This is
|
||||
// true on all actual versions of Unix, is widely assumed, and is specified in SuS
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html . If it is not
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/wait.html. If it is not
|
||||
// true for a platform pretending to be Unix, the tests (our doctests, and also
|
||||
// procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
|
||||
// procsss_unix/tests.rs) will spot it. `ExitStatusError::code` assumes this too.
|
||||
match NonZero_c_int::try_from(self.0) {
|
||||
Ok(failure) => Err(ExitStatusError(failure)),
|
||||
Err(_) => Ok(()),
|
||||
|
|
|
@ -73,7 +73,7 @@ impl Thread {
|
|||
n => {
|
||||
assert_eq!(n, libc::EINVAL);
|
||||
// EINVAL means |stack_size| is either too small or not a
|
||||
// multiple of the system page size. Because it's definitely
|
||||
// multiple of the system page size. Because it's definitely
|
||||
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
|
||||
// Round up to the nearest page and try again.
|
||||
let page_size = os::page_size();
|
||||
|
@ -755,10 +755,10 @@ pub mod guard {
|
|||
if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
|
||||
// Linux doesn't allocate the whole stack right away, and
|
||||
// the kernel has its own stack-guard mechanism to fault
|
||||
// when growing too close to an existing mapping. If we map
|
||||
// when growing too close to an existing mapping. If we map
|
||||
// our own guard, then the kernel starts enforcing a rather
|
||||
// large gap above that, rendering much of the possible
|
||||
// stack space useless. See #43052.
|
||||
// stack space useless. See #43052.
|
||||
//
|
||||
// Instead, we'll just note where we expect rlimit to start
|
||||
// faulting, so our handler can report "stack overflow", and
|
||||
|
@ -774,14 +774,14 @@ pub mod guard {
|
|||
None
|
||||
} else if cfg!(target_os = "freebsd") {
|
||||
// FreeBSD's stack autogrows, and optionally includes a guard page
|
||||
// at the bottom. If we try to remap the bottom of the stack
|
||||
// ourselves, FreeBSD's guard page moves upwards. So we'll just use
|
||||
// at the bottom. If we try to remap the bottom of the stack
|
||||
// ourselves, FreeBSD's guard page moves upwards. So we'll just use
|
||||
// the builtin guard page.
|
||||
let stackptr = get_stack_start_aligned()?;
|
||||
let guardaddr = stackptr.addr();
|
||||
// Technically the number of guard pages is tunable and controlled
|
||||
// by the security.bsd.stack_guard_page sysctl, but there are
|
||||
// few reasons to change it from the default. The default value has
|
||||
// few reasons to change it from the default. The default value has
|
||||
// been 1 ever since FreeBSD 11.1 and 10.4.
|
||||
const GUARD_PAGES: usize = 1;
|
||||
let guard = guardaddr..guardaddr + GUARD_PAGES * page_size;
|
||||
|
@ -877,9 +877,9 @@ pub mod guard {
|
|||
} else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
|
||||
{
|
||||
// glibc used to include the guard area within the stack, as noted in the BUGS
|
||||
// section of `man pthread_attr_getguardsize`. This has been corrected starting
|
||||
// section of `man pthread_attr_getguardsize`. This has been corrected starting
|
||||
// with glibc 2.27, and in some distro backports, so the guard is now placed at the
|
||||
// end (below) the stack. There's no easy way for us to know which we have at
|
||||
// end (below) the stack. There's no easy way for us to know which we have at
|
||||
// runtime, so we'll just match any fault in the range right above or below the
|
||||
// stack base to call that fault a stack overflow.
|
||||
Some(stackaddr - guardsize..stackaddr + guardsize)
|
||||
|
|
|
@ -157,7 +157,7 @@ impl<'a> Iterator for SplitPaths<'a> {
|
|||
// Double quotes are used as a way of introducing literal semicolons
|
||||
// (since c:\some;dir is a valid Windows path). Double quotes are not
|
||||
// themselves permitted in path names, so there is no way to escape a
|
||||
// double quote. Quoted regions can appear in arbitrary locations, so
|
||||
// double quote. Quoted regions can appear in arbitrary locations, so
|
||||
//
|
||||
// c:\foo;c:\som"e;di"r;c:\bar
|
||||
//
|
||||
|
|
|
@ -26,7 +26,7 @@ impl Thread {
|
|||
|
||||
// FIXME On UNIX, we guard against stack sizes that are too small but
|
||||
// that's because pthreads enforces that stacks are at least
|
||||
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
|
||||
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
|
||||
// just that below a certain threshold you can't do anything useful.
|
||||
// That threshold is application and architecture-specific, however.
|
||||
let ret = c::CreateThread(
|
||||
|
|
|
@ -116,7 +116,7 @@ pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Opt
|
|||
} else {
|
||||
if !opts.nocapture {
|
||||
// If we encounter a non-unwinding panic, flush any captured output from the current test,
|
||||
// and stop capturing output to ensure that the non-unwinding panic message is visible.
|
||||
// and stop capturing output to ensure that the non-unwinding panic message is visible.
|
||||
// We also acquire the locks for both output streams to prevent output from other threads
|
||||
// from interleaving with the panic message or appearing after it.
|
||||
let builtin_panic_hook = panic::take_hook();
|
||||
|
|
|
@ -30,7 +30,7 @@ pub(crate) fn get_dbpath_for_term(term: &str) -> Option<PathBuf> {
|
|||
}
|
||||
} else {
|
||||
// Found nothing in TERMINFO_DIRS, use the default paths:
|
||||
// According to /etc/terminfo/README, after looking at
|
||||
// According to /etc/terminfo/README, after looking at
|
||||
// ~/.terminfo, ncurses will search /etc/terminfo, then
|
||||
// /lib/terminfo, and eventually /usr/share/terminfo.
|
||||
// On Haiku the database can be found at /boot/system/data/terminfo
|
||||
|
|
|
@ -242,7 +242,7 @@ impl<'a, 'tcx> DocFolder for CacheBuilder<'a, 'tcx> {
|
|||
}
|
||||
|
||||
// Index this method for searching later on.
|
||||
if let Some(ref s) = item.name.or_else(|| {
|
||||
if let Some(s) = item.name.or_else(|| {
|
||||
if item.is_stripped() {
|
||||
None
|
||||
} else if let clean::ImportItem(ref i) = *item.kind &&
|
||||
|
@ -317,14 +317,15 @@ impl<'a, 'tcx> DocFolder for CacheBuilder<'a, 'tcx> {
|
|||
short_markdown_summary(x.as_str(), &item.link_names(self.cache))
|
||||
});
|
||||
let ty = item.type_();
|
||||
let name = s.to_string();
|
||||
if ty != ItemType::StructField || u16::from_str_radix(&name, 10).is_err() {
|
||||
if ty != ItemType::StructField
|
||||
|| u16::from_str_radix(s.as_str(), 10).is_err()
|
||||
{
|
||||
// In case this is a field from a tuple struct, we don't add it into
|
||||
// the search index because its name is something like "0", which is
|
||||
// not useful for rustdoc search.
|
||||
self.cache.search_index.push(IndexItem {
|
||||
ty,
|
||||
name,
|
||||
name: s,
|
||||
path: join_with_double_colon(path),
|
||||
desc,
|
||||
parent,
|
||||
|
|
|
@ -569,7 +569,7 @@ fn generate_macro_def_id_path(
|
|||
root_path: Option<&str>,
|
||||
) -> Result<(String, ItemType, Vec<Symbol>), HrefError> {
|
||||
let tcx = cx.shared.tcx;
|
||||
let crate_name = tcx.crate_name(def_id.krate).to_string();
|
||||
let crate_name = tcx.crate_name(def_id.krate);
|
||||
let cache = cx.cache();
|
||||
|
||||
let fqp: Vec<Symbol> = tcx
|
||||
|
@ -584,7 +584,7 @@ fn generate_macro_def_id_path(
|
|||
}
|
||||
})
|
||||
.collect();
|
||||
let mut relative = fqp.iter().map(|elem| elem.to_string());
|
||||
let mut relative = fqp.iter().copied();
|
||||
let cstore = CStore::from_tcx(tcx);
|
||||
// We need this to prevent a `panic` when this function is used from intra doc links...
|
||||
if !cstore.has_crate_data(def_id.krate) {
|
||||
|
@ -602,9 +602,9 @@ fn generate_macro_def_id_path(
|
|||
};
|
||||
|
||||
let mut path = if is_macro_2 {
|
||||
once(crate_name.clone()).chain(relative).collect()
|
||||
once(crate_name).chain(relative).collect()
|
||||
} else {
|
||||
vec![crate_name.clone(), relative.next_back().unwrap()]
|
||||
vec![crate_name, relative.next_back().unwrap()]
|
||||
};
|
||||
if path.len() < 2 {
|
||||
// The minimum we can have is the crate name followed by the macro name. If shorter, then
|
||||
|
@ -614,17 +614,22 @@ fn generate_macro_def_id_path(
|
|||
}
|
||||
|
||||
if let Some(last) = path.last_mut() {
|
||||
*last = format!("macro.{}.html", last);
|
||||
*last = Symbol::intern(&format!("macro.{}.html", last.as_str()));
|
||||
}
|
||||
|
||||
let url = match cache.extern_locations[&def_id.krate] {
|
||||
ExternalLocation::Remote(ref s) => {
|
||||
// `ExternalLocation::Remote` always end with a `/`.
|
||||
format!("{}{}", s, path.join("/"))
|
||||
format!("{}{}", s, path.iter().map(|p| p.as_str()).join("/"))
|
||||
}
|
||||
ExternalLocation::Local => {
|
||||
// `root_path` always end with a `/`.
|
||||
format!("{}{}/{}", root_path.unwrap_or(""), crate_name, path.join("/"))
|
||||
format!(
|
||||
"{}{}/{}",
|
||||
root_path.unwrap_or(""),
|
||||
crate_name,
|
||||
path.iter().map(|p| p.as_str()).join("/")
|
||||
)
|
||||
}
|
||||
ExternalLocation::Unknown => {
|
||||
debug!("crate {} not in cache when linkifying macros", crate_name);
|
||||
|
@ -1050,7 +1055,7 @@ fn fmt_type<'cx>(
|
|||
_ => String::new(),
|
||||
};
|
||||
let m = mutability.print_with_space();
|
||||
let amp = if f.alternate() { "&".to_string() } else { "&".to_string() };
|
||||
let amp = if f.alternate() { "&" } else { "&" };
|
||||
match **ty {
|
||||
clean::DynTrait(ref bounds, ref trait_lt)
|
||||
if bounds.len() > 1 || trait_lt.is_some() =>
|
||||
|
|
|
@ -30,7 +30,7 @@ use rustc_hir::def_id::DefId;
|
|||
use rustc_hir::HirId;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_span::edition::Edition;
|
||||
use rustc_span::Span;
|
||||
use rustc_span::{Span, Symbol};
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
use std::borrow::Cow;
|
||||
|
@ -198,7 +198,7 @@ fn slugify(c: char) -> Option<char> {
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Playground {
|
||||
pub crate_name: Option<String>,
|
||||
pub crate_name: Option<Symbol>,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
|
@ -290,7 +290,7 @@ impl<'a, I: Iterator<Item = Event<'a>>> Iterator for CodeBlocks<'_, 'a, I> {
|
|||
.map(|l| map_line(l).for_code())
|
||||
.intersperse("\n".into())
|
||||
.collect::<String>();
|
||||
let krate = krate.as_ref().map(|s| &**s);
|
||||
let krate = krate.as_ref().map(|s| s.as_str());
|
||||
let (test, _, _) =
|
||||
doctest::make_test(&test, krate, false, &Default::default(), edition, None);
|
||||
let channel = if test.contains("#![feature(") { "&version=nightly" } else { "" };
|
||||
|
|
|
@ -464,8 +464,7 @@ impl<'tcx> FormatRenderer<'tcx> for Context<'tcx> {
|
|||
// If user passed in `--playground-url` arg, we fill in crate name here
|
||||
let mut playground = None;
|
||||
if let Some(url) = playground_url {
|
||||
playground =
|
||||
Some(markdown::Playground { crate_name: Some(krate.name(tcx).to_string()), url });
|
||||
playground = Some(markdown::Playground { crate_name: Some(krate.name(tcx)), url });
|
||||
}
|
||||
let mut layout = layout::Layout {
|
||||
logo: String::new(),
|
||||
|
@ -491,7 +490,7 @@ impl<'tcx> FormatRenderer<'tcx> for Context<'tcx> {
|
|||
}
|
||||
(sym::html_playground_url, Some(s)) => {
|
||||
playground = Some(markdown::Playground {
|
||||
crate_name: Some(krate.name(tcx).to_string()),
|
||||
crate_name: Some(krate.name(tcx)),
|
||||
url: s.to_string(),
|
||||
});
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ pub(crate) fn ensure_trailing_slash(v: &str) -> impl fmt::Display + '_ {
|
|||
#[derive(Debug)]
|
||||
pub(crate) struct IndexItem {
|
||||
pub(crate) ty: ItemType,
|
||||
pub(crate) name: String,
|
||||
pub(crate) name: Symbol,
|
||||
pub(crate) path: String,
|
||||
pub(crate) desc: String,
|
||||
pub(crate) parent: Option<DefId>,
|
||||
|
@ -1343,7 +1343,7 @@ fn notable_traits_decl(ty: &clean::Type, cx: &Context<'_>) -> (String, String) {
|
|||
write!(
|
||||
&mut out,
|
||||
"<h3>Notable traits for <code>{}</code></h3>\
|
||||
<pre class=\"content\"><code>",
|
||||
<pre><code>",
|
||||
impl_.for_.print(cx)
|
||||
);
|
||||
}
|
||||
|
@ -2769,8 +2769,8 @@ fn collect_paths_for_type(first_ty: clean::Type, cache: &Cache) -> Vec<String> {
|
|||
let mut work = VecDeque::new();
|
||||
|
||||
let mut process_path = |did: DefId| {
|
||||
let get_extern = || cache.external_paths.get(&did).map(|s| s.0.clone());
|
||||
let fqp = cache.exact_paths.get(&did).cloned().or_else(get_extern);
|
||||
let get_extern = || cache.external_paths.get(&did).map(|s| &s.0);
|
||||
let fqp = cache.exact_paths.get(&did).or_else(get_extern);
|
||||
|
||||
if let Some(path) = fqp {
|
||||
out.push(join_with_double_colon(&path));
|
||||
|
|
|
@ -1027,8 +1027,8 @@ fn item_trait(w: &mut Buffer, cx: &mut Context<'_>, it: &clean::Item, t: &clean:
|
|||
.chain(std::iter::once("implementors"))
|
||||
.collect();
|
||||
if let Some(did) = it.item_id.as_def_id() &&
|
||||
let get_extern = { || cache.external_paths.get(&did).map(|s| s.0.clone()) } &&
|
||||
let Some(fqp) = cache.exact_paths.get(&did).cloned().or_else(get_extern) {
|
||||
let get_extern = { || cache.external_paths.get(&did).map(|s| &s.0) } &&
|
||||
let Some(fqp) = cache.exact_paths.get(&did).or_else(get_extern) {
|
||||
js_src_path.extend(fqp[..fqp.len() - 1].iter().copied());
|
||||
js_src_path.push_fmt(format_args!("{}.{}.js", it.type_(), fqp.last().unwrap()));
|
||||
} else {
|
||||
|
|
|
@ -35,7 +35,7 @@ pub(crate) fn build_index<'tcx>(
|
|||
.map_or_else(String::new, |s| short_markdown_summary(&s, &item.link_names(cache)));
|
||||
cache.search_index.push(IndexItem {
|
||||
ty: item.type_(),
|
||||
name: item.name.unwrap().to_string(),
|
||||
name: item.name.unwrap(),
|
||||
path: join_with_double_colon(&fqp[..fqp.len() - 1]),
|
||||
desc,
|
||||
parent: Some(parent),
|
||||
|
@ -58,8 +58,8 @@ pub(crate) fn build_index<'tcx>(
|
|||
// Sort search index items. This improves the compressibility of the search index.
|
||||
cache.search_index.sort_unstable_by(|k1, k2| {
|
||||
// `sort_unstable_by_key` produces lifetime errors
|
||||
let k1 = (&k1.path, &k1.name, &k1.ty, &k1.parent);
|
||||
let k2 = (&k2.path, &k2.name, &k2.ty, &k2.parent);
|
||||
let k1 = (&k1.path, k1.name.as_str(), &k1.ty, &k1.parent);
|
||||
let k2 = (&k2.path, k2.name.as_str(), &k2.ty, &k2.parent);
|
||||
std::cmp::Ord::cmp(&k1, &k2)
|
||||
});
|
||||
|
||||
|
@ -240,7 +240,7 @@ pub(crate) fn build_index<'tcx>(
|
|||
)?;
|
||||
crate_data.serialize_field(
|
||||
"n",
|
||||
&self.items.iter().map(|item| &item.name).collect::<Vec<_>>(),
|
||||
&self.items.iter().map(|item| item.name.as_str()).collect::<Vec<_>>(),
|
||||
)?;
|
||||
crate_data.serialize_field(
|
||||
"q",
|
||||
|
@ -299,7 +299,7 @@ pub(crate) fn build_index<'tcx>(
|
|||
)?;
|
||||
crate_data.serialize_field(
|
||||
"p",
|
||||
&self.paths.iter().map(|(it, s)| (it, s.to_string())).collect::<Vec<_>>(),
|
||||
&self.paths.iter().map(|(it, s)| (it, s.as_str())).collect::<Vec<_>>(),
|
||||
)?;
|
||||
if has_aliases {
|
||||
crate_data.serialize_field("a", &self.aliases)?;
|
||||
|
|
|
@ -1214,11 +1214,11 @@ a.test-arrow:hover {
|
|||
content: "\00a0";
|
||||
}
|
||||
|
||||
.notable .docblock {
|
||||
.notable .content {
|
||||
margin: 0.25em 0.5em;
|
||||
}
|
||||
|
||||
.notable .docblock pre, .notable .docblock code {
|
||||
.notable .content pre, .notable .content code {
|
||||
background: transparent;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
|
@ -1226,6 +1226,10 @@ a.test-arrow:hover {
|
|||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
.notable .content > h3:first-child {
|
||||
margin: 0 0 5px 0;
|
||||
}
|
||||
|
||||
.search-failed {
|
||||
text-align: center;
|
||||
margin-top: 20px;
|
||||
|
|
|
@ -847,7 +847,7 @@ function loadCss(cssUrl) {
|
|||
window.hideAllModals(false);
|
||||
const ty = e.getAttribute("data-ty");
|
||||
const wrapper = document.createElement("div");
|
||||
wrapper.innerHTML = "<div class=\"docblock\">" + window.NOTABLE_TRAITS[ty] + "</div>";
|
||||
wrapper.innerHTML = "<div class=\"content\">" + window.NOTABLE_TRAITS[ty] + "</div>";
|
||||
wrapper.className = "notable popover";
|
||||
const focusCatcher = document.createElement("div");
|
||||
focusCatcher.setAttribute("tabindex", "0");
|
||||
|
|
|
@ -22,7 +22,8 @@ fn main() {
|
|||
}
|
||||
|
||||
// test `stat`
|
||||
assert_eq!(fs::metadata("foo.txt").unwrap_err().kind(), ErrorKind::PermissionDenied);
|
||||
let err = fs::metadata("foo.txt").unwrap_err();
|
||||
assert_eq!(err.kind(), ErrorKind::PermissionDenied);
|
||||
// check that it is the right kind of `PermissionDenied`
|
||||
assert_eq!(Error::last_os_error().raw_os_error(), Some(libc::EACCES));
|
||||
assert_eq!(err.raw_os_error(), Some(libc::EACCES));
|
||||
}
|
||||
|
|
|
@ -1 +1 @@
|
|||
<script type="text/json" id="notable-traits-data">{"&'static [SomeStruct]":"<h3>Notable traits for <code>&amp;[<a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait_slice::SomeStruct\">SomeStruct</a>]</code></h3><pre class=\"content\"><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait_slice::SomeTrait\">SomeTrait</a> for &amp;[<a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait_slice::SomeStruct\">SomeStruct</a>]</span>"}</script>
|
||||
<script type="text/json" id="notable-traits-data">{"&'static [SomeStruct]":"<h3>Notable traits for <code>&amp;[<a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait_slice::SomeStruct\">SomeStruct</a>]</code></h3><pre><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait_slice::SomeTrait\">SomeTrait</a> for &amp;[<a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait_slice::SomeStruct\">SomeStruct</a>]</span>"}</script>
|
|
@ -1 +1 @@
|
|||
<script type="text/json" id="notable-traits-data">{"SomeStruct":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></code></h3><pre class=\"content\"><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></span>"}</script>
|
||||
<script type="text/json" id="notable-traits-data">{"SomeStruct":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></code></h3><pre><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></span>"}</script>
|
|
@ -1 +1 @@
|
|||
<script type="text/json" id="notable-traits-data">{"SomeStruct":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></code></h3><pre class=\"content\"><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></span>","Wrapper<Self>":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</code></h3><pre class=\"content\"><code><span class=\"where fmt-newline\">impl&lt;T:&nbsp;<a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a>&gt; <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</span>"}</script>
|
||||
<script type="text/json" id="notable-traits-data">{"SomeStruct":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></code></h3><pre><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.SomeStruct.html\" title=\"struct doc_notable_trait::SomeStruct\">SomeStruct</a></span>","Wrapper<Self>":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</code></h3><pre><code><span class=\"where fmt-newline\">impl&lt;T:&nbsp;<a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a>&gt; <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</span>"}</script>
|
|
@ -1 +1 @@
|
|||
<script type="text/json" id="notable-traits-data">{"Wrapper<Self>":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</code></h3><pre class=\"content\"><code><span class=\"where fmt-newline\">impl&lt;T:&nbsp;<a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a>&gt; <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</span>"}</script>
|
||||
<script type="text/json" id="notable-traits-data">{"Wrapper<Self>":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</code></h3><pre><code><span class=\"where fmt-newline\">impl&lt;T:&nbsp;<a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a>&gt; <a class=\"trait\" href=\"trait.SomeTrait.html\" title=\"trait doc_notable_trait::SomeTrait\">SomeTrait</a> for <a class=\"struct\" href=\"struct.Wrapper.html\" title=\"struct doc_notable_trait::Wrapper\">Wrapper</a>&lt;T&gt;</span>"}</script>
|
|
@ -1 +1 @@
|
|||
<script type="text/json" id="notable-traits-data">{"Odd":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.Odd.html\" title=\"struct foo::Odd\">Odd</a></code></h3><pre class=\"content\"><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"{{channel}}/core/iter/traits/iterator/trait.Iterator.html\" title=\"trait core::iter::traits::iterator::Iterator\">Iterator</a> for <a class=\"struct\" href=\"struct.Odd.html\" title=\"struct foo::Odd\">Odd</a></span><span class=\"where fmt-newline\"> type <a href=\"{{channel}}/core/iter/traits/iterator/trait.Iterator.html#associatedtype.Item\" class=\"associatedtype\">Item</a> = <a class=\"primitive\" href=\"{{channel}}/std/primitive.usize.html\">usize</a>;</span>"}</script>
|
||||
<script type="text/json" id="notable-traits-data">{"Odd":"<h3>Notable traits for <code><a class=\"struct\" href=\"struct.Odd.html\" title=\"struct foo::Odd\">Odd</a></code></h3><pre><code><span class=\"where fmt-newline\">impl <a class=\"trait\" href=\"{{channel}}/core/iter/traits/iterator/trait.Iterator.html\" title=\"trait core::iter::traits::iterator::Iterator\">Iterator</a> for <a class=\"struct\" href=\"struct.Odd.html\" title=\"struct foo::Odd\">Odd</a></span><span class=\"where fmt-newline\"> type <a href=\"{{channel}}/core/iter/traits/iterator/trait.Iterator.html#associatedtype.Item\" class=\"associatedtype\">Item</a> = <a class=\"primitive\" href=\"{{channel}}/std/primitive.usize.html\">usize</a>;</span>"}</script>
|
|
@ -2,10 +2,17 @@
|
|||
// run-rustfix
|
||||
#![warn(unused_braces)]
|
||||
|
||||
macro_rules! make_1 {
|
||||
() => {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
struct A<const N: usize>;
|
||||
|
||||
fn main() {
|
||||
let _: A<7>; // ok
|
||||
let _: A<7>; //~ WARN unnecessary braces
|
||||
let _: A<{ 3 + 5 }>; // ok
|
||||
let _: A<{make_1!()}>; // ok
|
||||
}
|
||||
|
|
|
@ -2,10 +2,17 @@
|
|||
// run-rustfix
|
||||
#![warn(unused_braces)]
|
||||
|
||||
macro_rules! make_1 {
|
||||
() => {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
struct A<const N: usize>;
|
||||
|
||||
fn main() {
|
||||
let _: A<7>; // ok
|
||||
let _: A<{ 7 }>; //~ WARN unnecessary braces
|
||||
let _: A<{ 3 + 5 }>; // ok
|
||||
let _: A<{make_1!()}>; // ok
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
warning: unnecessary braces around const expression
|
||||
--> $DIR/unused_braces.rs:9:14
|
||||
--> $DIR/unused_braces.rs:15:14
|
||||
|
|
||||
LL | let _: A<{ 7 }>;
|
||||
| ^^ ^^
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
error[E0282]: type annotations needed
|
||||
--> $DIR/cannot-infer-partial-try-return.rs:20:9
|
||||
|
|
||||
LL | infallible()?;
|
||||
| ------------- type must be known at this point
|
||||
LL | Ok(())
|
||||
| ^^ cannot infer type of the type parameter `E` declared on the enum `Result`
|
||||
|
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
error[E0282]: type annotations needed
|
||||
--> $DIR/question-mark-type-infer.rs:10:30
|
||||
--> $DIR/question-mark-type-infer.rs:10:21
|
||||
|
|
||||
LL | l.iter().map(f).collect()?
|
||||
| ^ cannot infer type
|
||||
| ^^^^^^^ cannot infer type of the type parameter `B` declared on the associated function `collect`
|
||||
|
|
||||
help: consider specifying the generic argument
|
||||
|
|
||||
LL | l.iter().map(f).collect::<Vec<_>>()?
|
||||
| ++++++++++
|
||||
|
||||
error: aborting due to previous error
|
||||
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
error[E0282]: type annotations needed
|
||||
--> $DIR/issue-69455.rs:29:20
|
||||
error[E0284]: type annotations needed
|
||||
--> $DIR/issue-69455.rs:29:41
|
||||
|
|
||||
LL | println!("{}", 23u64.test(xs.iter().sum()));
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cannot infer type of the type parameter `T` declared on the associated function `new_display`
|
||||
| ---- ^^^ cannot infer type of the type parameter `S` declared on the associated function `sum`
|
||||
| |
|
||||
| type must be known at this point
|
||||
|
|
||||
= note: this error originates in the macro `$crate::format_args_nl` which comes from the expansion of the macro `println` (in Nightly builds, run with -Z macro-backtrace for more info)
|
||||
= note: cannot satisfy `<u64 as Test<_>>::Output == _`
|
||||
help: consider specifying the generic argument
|
||||
|
|
||||
LL | println!("{}", 23u64.test(xs.iter().sum())::<T>);
|
||||
| +++++
|
||||
LL | println!("{}", 23u64.test(xs.iter().sum::<S>()));
|
||||
| +++++
|
||||
|
||||
error[E0283]: type annotations needed
|
||||
--> $DIR/issue-69455.rs:29:41
|
||||
|
@ -33,5 +35,5 @@ LL | println!("{}", 23u64.test(xs.iter().sum::<S>()));
|
|||
|
||||
error: aborting due to 2 previous errors
|
||||
|
||||
Some errors have detailed explanations: E0282, E0283.
|
||||
For more information about an error, try `rustc --explain E0282`.
|
||||
Some errors have detailed explanations: E0283, E0284.
|
||||
For more information about an error, try `rustc --explain E0283`.
|
||||
|
|
|
@ -2,7 +2,7 @@ error[E0282]: type annotations needed for `(Vec<T>,)`
|
|||
--> $DIR/cannot_infer_local_or_vec_in_tuples.rs:2:9
|
||||
|
|
||||
LL | let (x, ) = (vec![], );
|
||||
| ^^^^^
|
||||
| ^^^^^ ---------- type must be known at this point
|
||||
|
|
||||
help: consider giving this pattern a type, where the type for type parameter `T` is specified
|
||||
|
|
||||
|
|
Loading…
Add table
Reference in a new issue