mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-29 06:23:25 +00:00
Auto merge of #15353 - HKalbasi:mir, r=HKalbasi
Add manual implementation of clone for tuples in mir interpreter And some other minor changes. Clone for tuple is not implemented in the std and it is magically implemented by the compiler, so we need this.
This commit is contained in:
commit
f442c4aad6
6 changed files with 162 additions and 15 deletions
|
@ -1428,14 +1428,14 @@ fn builtin_derive_macro() {
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct Y {
|
struct Y {
|
||||||
field1: i32,
|
field1: i32,
|
||||||
field2: u8,
|
field2: ((i32, u8), i64),
|
||||||
}
|
}
|
||||||
|
|
||||||
const GOAL: u8 = {
|
const GOAL: u8 = {
|
||||||
let x = X(2, Z::Foo(Y { field1: 4, field2: 5 }), 8);
|
let x = X(2, Z::Foo(Y { field1: 4, field2: ((32, 5), 12) }), 8);
|
||||||
let x = x.clone();
|
let x = x.clone();
|
||||||
let Z::Foo(t) = x.1;
|
let Z::Foo(t) = x.1;
|
||||||
t.field2
|
t.field2.0 .1
|
||||||
};
|
};
|
||||||
"#,
|
"#,
|
||||||
5,
|
5,
|
||||||
|
@ -1632,6 +1632,34 @@ const GOAL: i32 = {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn closure_capture_unsized_type() {
|
||||||
|
check_number(
|
||||||
|
r#"
|
||||||
|
//- minicore: fn, copy, slice, index, coerce_unsized
|
||||||
|
fn f<T: A>(x: &<T as A>::Ty) -> &<T as A>::Ty {
|
||||||
|
let c = || &*x;
|
||||||
|
c()
|
||||||
|
}
|
||||||
|
|
||||||
|
trait A {
|
||||||
|
type Ty;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl A for i32 {
|
||||||
|
type Ty = [u8];
|
||||||
|
}
|
||||||
|
|
||||||
|
const GOAL: u8 = {
|
||||||
|
let k: &[u8] = &[1, 2, 3];
|
||||||
|
let k = f::<i32>(k);
|
||||||
|
k[0] + k[1] + k[2]
|
||||||
|
}
|
||||||
|
"#,
|
||||||
|
6,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn closure_and_impl_fn() {
|
fn closure_and_impl_fn() {
|
||||||
check_number(
|
check_number(
|
||||||
|
@ -2521,12 +2549,16 @@ fn const_trait_assoc() {
|
||||||
);
|
);
|
||||||
check_number(
|
check_number(
|
||||||
r#"
|
r#"
|
||||||
//- minicore: size_of
|
//- minicore: size_of, fn
|
||||||
//- /a/lib.rs crate:a
|
//- /a/lib.rs crate:a
|
||||||
use core::mem::size_of;
|
use core::mem::size_of;
|
||||||
pub struct S<T>(T);
|
pub struct S<T>(T);
|
||||||
impl<T> S<T> {
|
impl<T> S<T> {
|
||||||
pub const X: usize = core::mem::size_of::<T>();
|
pub const X: usize = {
|
||||||
|
let k: T;
|
||||||
|
let f = || core::mem::size_of::<T>();
|
||||||
|
f()
|
||||||
|
};
|
||||||
}
|
}
|
||||||
//- /main.rs crate:main deps:a
|
//- /main.rs crate:main deps:a
|
||||||
use a::{S};
|
use a::{S};
|
||||||
|
|
|
@ -438,6 +438,8 @@ fn atomic() {
|
||||||
pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
|
pub fn atomic_nand_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
|
||||||
pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
|
pub fn atomic_or_release<T: Copy>(dst: *mut T, src: T) -> T;
|
||||||
pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
|
pub fn atomic_xor_seqcst<T: Copy>(dst: *mut T, src: T) -> T;
|
||||||
|
pub fn atomic_fence_seqcst();
|
||||||
|
pub fn atomic_singlethreadfence_acqrel();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_not_reach() {
|
fn should_not_reach() {
|
||||||
|
@ -452,6 +454,7 @@ fn atomic() {
|
||||||
if (30, true) != atomic_cxchg_release_seqcst(&mut y, 30, 40) {
|
if (30, true) != atomic_cxchg_release_seqcst(&mut y, 30, 40) {
|
||||||
should_not_reach();
|
should_not_reach();
|
||||||
}
|
}
|
||||||
|
atomic_fence_seqcst();
|
||||||
if (40, false) != atomic_cxchg_release_seqcst(&mut y, 30, 50) {
|
if (40, false) != atomic_cxchg_release_seqcst(&mut y, 30, 50) {
|
||||||
should_not_reach();
|
should_not_reach();
|
||||||
}
|
}
|
||||||
|
@ -459,6 +462,7 @@ fn atomic() {
|
||||||
should_not_reach();
|
should_not_reach();
|
||||||
}
|
}
|
||||||
let mut z = atomic_xsub_seqcst(&mut x, -200);
|
let mut z = atomic_xsub_seqcst(&mut x, -200);
|
||||||
|
atomic_singlethreadfence_acqrel();
|
||||||
atomic_xor_seqcst(&mut x, 1024);
|
atomic_xor_seqcst(&mut x, 1024);
|
||||||
atomic_load_seqcst(&x) + z * 3 + atomic_load_seqcst(&y) * 2
|
atomic_load_seqcst(&x) + z * 3 + atomic_load_seqcst(&y) * 2
|
||||||
};
|
};
|
||||||
|
|
|
@ -14,7 +14,7 @@ use triomphe::Arc;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
consteval::try_const_usize, db::HirDatabase, infer::normalize, layout::adt::struct_variant_idx,
|
consteval::try_const_usize, db::HirDatabase, infer::normalize, layout::adt::struct_variant_idx,
|
||||||
utils::ClosureSubst, Interner, Substitution, TraitEnvironment, Ty,
|
utils::ClosureSubst, Interner, ProjectionTy, Substitution, TraitEnvironment, Ty,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use self::{
|
pub use self::{
|
||||||
|
@ -279,7 +279,15 @@ pub fn layout_of_ty_query(
|
||||||
// return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
|
// return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
|
||||||
// }
|
// }
|
||||||
|
|
||||||
let unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone());
|
let mut unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone());
|
||||||
|
if let TyKind::AssociatedType(id, subst) = unsized_part.kind(Interner) {
|
||||||
|
unsized_part = TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy {
|
||||||
|
associated_ty_id: *id,
|
||||||
|
substitution: subst.clone(),
|
||||||
|
}))
|
||||||
|
.intern(Interner);
|
||||||
|
}
|
||||||
|
unsized_part = normalize(db, trait_env.clone(), unsized_part);
|
||||||
let metadata = match unsized_part.kind(Interner) {
|
let metadata = match unsized_part.kind(Interner) {
|
||||||
TyKind::Slice(_) | TyKind::Str => {
|
TyKind::Slice(_) | TyKind::Str => {
|
||||||
scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false))
|
scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false))
|
||||||
|
@ -362,8 +370,16 @@ pub fn layout_of_ty_query(
|
||||||
return Err(LayoutError::NotImplemented)
|
return Err(LayoutError::NotImplemented)
|
||||||
}
|
}
|
||||||
TyKind::Error => return Err(LayoutError::HasErrorType),
|
TyKind::Error => return Err(LayoutError::HasErrorType),
|
||||||
TyKind::AssociatedType(_, _)
|
TyKind::AssociatedType(id, subst) => {
|
||||||
| TyKind::Alias(_)
|
// Try again with `TyKind::Alias` to normalize the associated type.
|
||||||
|
let ty = TyKind::Alias(chalk_ir::AliasTy::Projection(ProjectionTy {
|
||||||
|
associated_ty_id: *id,
|
||||||
|
substitution: subst.clone(),
|
||||||
|
}))
|
||||||
|
.intern(Interner);
|
||||||
|
return db.layout_of_ty(ty, trait_env);
|
||||||
|
}
|
||||||
|
TyKind::Alias(_)
|
||||||
| TyKind::Placeholder(_)
|
| TyKind::Placeholder(_)
|
||||||
| TyKind::BoundVar(_)
|
| TyKind::BoundVar(_)
|
||||||
| TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder),
|
| TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder),
|
||||||
|
|
|
@ -313,6 +313,7 @@ pub enum MirEvalError {
|
||||||
InvalidVTableId(usize),
|
InvalidVTableId(usize),
|
||||||
CoerceUnsizedError(Ty),
|
CoerceUnsizedError(Ty),
|
||||||
LangItemNotFound(LangItem),
|
LangItemNotFound(LangItem),
|
||||||
|
BrokenLayout(Layout),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MirEvalError {
|
impl MirEvalError {
|
||||||
|
@ -399,6 +400,7 @@ impl MirEvalError {
|
||||||
| MirEvalError::TargetDataLayoutNotAvailable
|
| MirEvalError::TargetDataLayoutNotAvailable
|
||||||
| MirEvalError::CoerceUnsizedError(_)
|
| MirEvalError::CoerceUnsizedError(_)
|
||||||
| MirEvalError::LangItemNotFound(_)
|
| MirEvalError::LangItemNotFound(_)
|
||||||
|
| MirEvalError::BrokenLayout(_)
|
||||||
| MirEvalError::InvalidVTableId(_) => writeln!(f, "{:?}", err)?,
|
| MirEvalError::InvalidVTableId(_) => writeln!(f, "{:?}", err)?,
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -433,6 +435,7 @@ impl std::fmt::Debug for MirEvalError {
|
||||||
Self::CoerceUnsizedError(arg0) => {
|
Self::CoerceUnsizedError(arg0) => {
|
||||||
f.debug_tuple("CoerceUnsizedError").field(arg0).finish()
|
f.debug_tuple("CoerceUnsizedError").field(arg0).finish()
|
||||||
}
|
}
|
||||||
|
Self::BrokenLayout(arg0) => f.debug_tuple("BrokenLayout").field(arg0).finish(),
|
||||||
Self::InvalidVTableId(arg0) => f.debug_tuple("InvalidVTableId").field(arg0).finish(),
|
Self::InvalidVTableId(arg0) => f.debug_tuple("InvalidVTableId").field(arg0).finish(),
|
||||||
Self::NotSupported(arg0) => f.debug_tuple("NotSupported").field(arg0).finish(),
|
Self::NotSupported(arg0) => f.debug_tuple("NotSupported").field(arg0).finish(),
|
||||||
Self::InvalidConst(arg0) => {
|
Self::InvalidConst(arg0) => {
|
||||||
|
@ -702,9 +705,7 @@ impl Evaluator<'_> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Arc<Layout>> {
|
fn layout_adt(&self, adt: AdtId, subst: Substitution) -> Result<Arc<Layout>> {
|
||||||
self.db.layout_of_adt(adt, subst.clone(), self.trait_env.clone()).map_err(|e| {
|
self.layout(&TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner))
|
||||||
MirEvalError::LayoutError(e, TyKind::Adt(chalk_ir::AdtId(adt), subst).intern(Interner))
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals) -> Result<Ty> {
|
fn place_ty<'a>(&'a self, p: &Place, locals: &'a Locals) -> Result<Ty> {
|
||||||
|
@ -1543,12 +1544,18 @@ impl Evaluator<'_> {
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
let mut result = vec![0; size];
|
let mut result = vec![0; size];
|
||||||
if let Some((offset, size, value)) = tag {
|
if let Some((offset, size, value)) = tag {
|
||||||
result[offset..offset + size].copy_from_slice(&value.to_le_bytes()[0..size]);
|
match result.get_mut(offset..offset + size) {
|
||||||
|
Some(it) => it.copy_from_slice(&value.to_le_bytes()[0..size]),
|
||||||
|
None => return Err(MirEvalError::BrokenLayout(variant_layout.clone())),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (i, op) in values.enumerate() {
|
for (i, op) in values.enumerate() {
|
||||||
let offset = variant_layout.fields.offset(i).bytes_usize();
|
let offset = variant_layout.fields.offset(i).bytes_usize();
|
||||||
let op = op.get(&self)?;
|
let op = op.get(&self)?;
|
||||||
result[offset..offset + op.len()].copy_from_slice(op);
|
match result.get_mut(offset..offset + op.len()) {
|
||||||
|
Some(it) => it.copy_from_slice(op),
|
||||||
|
None => return Err(MirEvalError::BrokenLayout(variant_layout.clone())),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,9 +124,85 @@ impl Evaluator<'_> {
|
||||||
destination.write_from_bytes(self, &result)?;
|
destination.write_from_bytes(self, &result)?;
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
if let ItemContainerId::TraitId(t) = def.lookup(self.db.upcast()).container {
|
||||||
|
if self.db.lang_attr(t.into()) == Some(LangItem::Clone) {
|
||||||
|
let [self_ty] = generic_args.as_slice(Interner) else {
|
||||||
|
not_supported!("wrong generic arg count for clone");
|
||||||
|
};
|
||||||
|
let Some(self_ty) = self_ty.ty(Interner) else {
|
||||||
|
not_supported!("wrong generic arg kind for clone");
|
||||||
|
};
|
||||||
|
// Clone has special impls for tuples and function pointers
|
||||||
|
if matches!(self_ty.kind(Interner), TyKind::Function(_) | TyKind::Tuple(..)) {
|
||||||
|
self.exec_clone(def, args, self_ty.clone(), locals, destination, span)?;
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Clone has special impls for tuples and function pointers
|
||||||
|
fn exec_clone(
|
||||||
|
&mut self,
|
||||||
|
def: FunctionId,
|
||||||
|
args: &[IntervalAndTy],
|
||||||
|
self_ty: Ty,
|
||||||
|
locals: &Locals,
|
||||||
|
destination: Interval,
|
||||||
|
span: MirSpan,
|
||||||
|
) -> Result<()> {
|
||||||
|
match self_ty.kind(Interner) {
|
||||||
|
TyKind::Function(_) => {
|
||||||
|
let [arg] = args else {
|
||||||
|
not_supported!("wrong arg count for clone");
|
||||||
|
};
|
||||||
|
let addr = Address::from_bytes(arg.get(self)?)?;
|
||||||
|
return destination
|
||||||
|
.write_from_interval(self, Interval { addr, size: destination.size });
|
||||||
|
}
|
||||||
|
TyKind::Tuple(_, subst) => {
|
||||||
|
let [arg] = args else {
|
||||||
|
not_supported!("wrong arg count for clone");
|
||||||
|
};
|
||||||
|
let addr = Address::from_bytes(arg.get(self)?)?;
|
||||||
|
let layout = self.layout(&self_ty)?;
|
||||||
|
for (i, ty) in subst.iter(Interner).enumerate() {
|
||||||
|
let ty = ty.assert_ty_ref(Interner);
|
||||||
|
let size = self.layout(ty)?.size.bytes_usize();
|
||||||
|
let tmp = self.heap_allocate(self.ptr_size(), self.ptr_size())?;
|
||||||
|
let arg = IntervalAndTy {
|
||||||
|
interval: Interval { addr: tmp, size: self.ptr_size() },
|
||||||
|
ty: TyKind::Ref(Mutability::Not, static_lifetime(), ty.clone())
|
||||||
|
.intern(Interner),
|
||||||
|
};
|
||||||
|
let offset = layout.fields.offset(i).bytes_usize();
|
||||||
|
self.write_memory(tmp, &addr.offset(offset).to_bytes())?;
|
||||||
|
self.exec_clone(
|
||||||
|
def,
|
||||||
|
&[arg],
|
||||||
|
ty.clone(),
|
||||||
|
locals,
|
||||||
|
destination.slice(offset..offset + size),
|
||||||
|
span,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
self.exec_fn_with_args(
|
||||||
|
def,
|
||||||
|
args,
|
||||||
|
Substitution::from1(Interner, self_ty),
|
||||||
|
locals,
|
||||||
|
destination,
|
||||||
|
None,
|
||||||
|
span,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn exec_alloc_fn(
|
fn exec_alloc_fn(
|
||||||
&mut self,
|
&mut self,
|
||||||
alloc_fn: &str,
|
alloc_fn: &str,
|
||||||
|
@ -1057,7 +1133,14 @@ impl Evaluator<'_> {
|
||||||
_span: MirSpan,
|
_span: MirSpan,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// We are a single threaded runtime with no UB checking and no optimization, so
|
// We are a single threaded runtime with no UB checking and no optimization, so
|
||||||
// we can implement these as normal functions.
|
// we can implement atomic intrinsics as normal functions.
|
||||||
|
|
||||||
|
if name.starts_with("singlethreadfence_") || name.starts_with("fence_") {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rest of atomic intrinsics have exactly one generic arg
|
||||||
|
|
||||||
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else {
|
let Some(ty) = generic_args.as_slice(Interner).get(0).and_then(|it| it.ty(Interner)) else {
|
||||||
return Err(MirEvalError::TypeError("atomic intrinsic generic arg is not provided"));
|
return Err(MirEvalError::TypeError("atomic intrinsic generic arg is not provided"));
|
||||||
};
|
};
|
||||||
|
|
|
@ -660,6 +660,11 @@ impl<'ctx> MirLowerCtx<'ctx> {
|
||||||
expr_id.into(),
|
expr_id.into(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
TyKind::Closure(_, _) => {
|
||||||
|
not_supported!(
|
||||||
|
"method resolution not emitted for closure (Are Fn traits available?)"
|
||||||
|
);
|
||||||
|
}
|
||||||
TyKind::Error => {
|
TyKind::Error => {
|
||||||
return Err(MirLowerError::MissingFunctionDefinition(self.owner, expr_id))
|
return Err(MirLowerError::MissingFunctionDefinition(self.owner, expr_id))
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue