use crate::data_value::DataValue;
use crate::entity::SecondaryMap;
use crate::fx::{FxHashMap, FxHashSet};
use crate::inst_predicates::{has_lowering_side_effect, is_constant_64bit};
use crate::ir::instructions::BranchInfo;
use crate::ir::{
ArgumentPurpose, Block, Constant, ConstantData, ExternalName, Function, GlobalValueData, Inst,
InstructionData, MemFlags, Opcode, Signature, SourceLoc, Type, Value, ValueDef,
};
use crate::machinst::{
writable_value_regs, ABICallee, BlockIndex, BlockLoweringOrder, LoweredBlock, MachLabel, VCode,
VCodeBuilder, VCodeConstant, VCodeConstantData, VCodeConstants, VCodeInst, ValueRegs,
};
use crate::CodegenResult;
use alloc::boxed::Box;
use alloc::vec::Vec;
use core::convert::TryInto;
use log::debug;
use regalloc::{Reg, StackmapRequestInfo, Writable};
use smallvec::SmallVec;
use std::fmt::Debug;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
struct InstColor(u32);
impl InstColor {
fn new(n: u32) -> InstColor {
InstColor(n)
}
pub fn get(self) -> u32 {
self.0
}
}
pub trait LowerCtx {
type I: VCodeInst;
fn abi(&mut self) -> &mut dyn ABICallee<I = Self::I>;
fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>>;
fn get_vm_context(&self) -> Option<Reg>;
fn data(&self, ir_inst: Inst) -> &InstructionData;
fn ty(&self, ir_inst: Inst) -> Type;
fn call_target<'b>(&'b self, ir_inst: Inst) -> Option<(&'b ExternalName, RelocDistance)>;
fn call_sig<'b>(&'b self, ir_inst: Inst) -> Option<&'b Signature>;
fn symbol_value<'b>(&'b self, ir_inst: Inst) -> Option<(&'b ExternalName, RelocDistance, i64)>;
fn memflags(&self, ir_inst: Inst) -> Option<MemFlags>;
fn srcloc(&self, ir_inst: Inst) -> SourceLoc;
fn num_inputs(&self, ir_inst: Inst) -> usize;
fn num_outputs(&self, ir_inst: Inst) -> usize;
fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type;
fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type;
fn get_constant(&self, ir_inst: Inst) -> Option<u64>;
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput;
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg>;
fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>>;
fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>>;
fn emit(&mut self, mach_inst: Self::I);
fn emit_safepoint(&mut self, mach_inst: Self::I);
fn sink_inst(&mut self, ir_inst: Inst);
fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData;
fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant;
fn get_immediate(&self, ir_inst: Inst) -> Option<DataValue>;
fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg;
}
#[derive(Clone, Copy, Debug)]
pub struct NonRegInput {
pub inst: Option<(Inst, usize)>,
pub constant: Option<u64>,
}
pub trait LowerBackend {
type MInst: VCodeInst;
fn lower<C: LowerCtx<I = Self::MInst>>(&self, ctx: &mut C, inst: Inst) -> CodegenResult<()>;
fn lower_branch_group<C: LowerCtx<I = Self::MInst>>(
&self,
ctx: &mut C,
insts: &[Inst],
targets: &[MachLabel],
) -> CodegenResult<()>;
fn maybe_pinned_reg(&self) -> Option<Reg> {
None
}
}
struct InstTuple<I: VCodeInst> {
loc: SourceLoc,
is_safepoint: bool,
inst: I,
}
pub struct Lower<'func, I: VCodeInst> {
f: &'func Function,
vcode: VCodeBuilder<I>,
value_regs: SecondaryMap<Value, ValueRegs<Reg>>,
retval_regs: Vec<ValueRegs<Reg>>,
block_end_colors: SecondaryMap<Block, InstColor>,
side_effect_inst_entry_colors: FxHashMap<Inst, InstColor>,
cur_scan_entry_color: Option<InstColor>,
cur_inst: Option<Inst>,
inst_constants: FxHashMap<Inst, u64>,
value_uses: SecondaryMap<Value, u32>,
value_lowered_uses: SecondaryMap<Value, u32>,
inst_sunk: FxHashSet<Inst>,
next_vreg: u32,
block_insts: Vec<InstTuple<I>>,
block_ranges: Vec<(usize, usize)>,
bb_insts: Vec<InstTuple<I>>,
ir_insts: Vec<InstTuple<I>>,
pinned_reg: Option<Reg>,
vm_context: Option<Reg>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum RelocDistance {
Near,
Far,
}
fn alloc_vregs<I: VCodeInst>(
ty: Type,
next_vreg: &mut u32,
vcode: &mut VCodeBuilder<I>,
) -> CodegenResult<ValueRegs<Reg>> {
let v = *next_vreg;
let (regclasses, tys) = I::rc_for_type(ty)?;
*next_vreg += regclasses.len() as u32;
let regs = match regclasses {
&[rc0] => ValueRegs::one(Reg::new_virtual(rc0, v)),
&[rc0, rc1] => ValueRegs::two(Reg::new_virtual(rc0, v), Reg::new_virtual(rc1, v + 1)),
#[cfg(feature = "arm32")]
&[rc0, rc1, rc2, rc3] => ValueRegs::four(
Reg::new_virtual(rc0, v),
Reg::new_virtual(rc1, v + 1),
Reg::new_virtual(rc2, v + 2),
Reg::new_virtual(rc3, v + 3),
),
_ => panic!("Value must reside in 1, 2 or 4 registers"),
};
for (®_ty, ®) in tys.iter().zip(regs.regs().iter()) {
vcode.set_vreg_type(reg.to_virtual_reg(), reg_ty);
}
Ok(regs)
}
enum GenerateReturn {
Yes,
No,
}
impl<'func, I: VCodeInst> Lower<'func, I> {
pub fn new(
f: &'func Function,
abi: Box<dyn ABICallee<I = I>>,
emit_info: I::Info,
block_order: BlockLoweringOrder,
) -> CodegenResult<Lower<'func, I>> {
let constants = VCodeConstants::with_capacity(f.dfg.constants.len());
let mut vcode = VCodeBuilder::new(abi, emit_info, block_order, constants);
let mut next_vreg: u32 = 0;
let mut value_regs = SecondaryMap::with_default(ValueRegs::invalid());
for bb in f.layout.blocks() {
for ¶m in f.dfg.block_params(bb) {
let ty = f.dfg.value_type(param);
if value_regs[param].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[param] = regs;
debug!("bb {} param {}: regs {:?}", bb, param, regs);
}
}
for inst in f.layout.block_insts(bb) {
for &result in f.dfg.inst_results(inst) {
let ty = f.dfg.value_type(result);
if value_regs[result].is_invalid() {
let regs = alloc_vregs(ty, &mut next_vreg, &mut vcode)?;
value_regs[result] = regs;
debug!(
"bb {} inst {} ({:?}): result regs {:?}",
bb, inst, f.dfg[inst], regs,
);
}
}
}
}
let vm_context = f
.signature
.special_param_index(ArgumentPurpose::VMContext)
.map(|vm_context_index| {
let entry_block = f.layout.entry_block().unwrap();
let param = f.dfg.block_params(entry_block)[vm_context_index];
value_regs[param].only_reg().unwrap()
});
let mut retval_regs = vec![];
for ret in &f.signature.returns {
let regs = alloc_vregs(ret.value_type, &mut next_vreg, &mut vcode)?;
retval_regs.push(regs);
debug!("retval gets regs {:?}", regs);
}
let mut cur_color = 0;
let mut block_end_colors = SecondaryMap::with_default(InstColor::new(0));
let mut side_effect_inst_entry_colors = FxHashMap::default();
let mut inst_constants = FxHashMap::default();
let mut value_uses = SecondaryMap::with_default(0);
for bb in f.layout.blocks() {
cur_color += 1;
for inst in f.layout.block_insts(bb) {
let side_effect = has_lowering_side_effect(f, inst);
debug!("bb {} inst {} has color {}", bb, inst, cur_color);
if side_effect {
side_effect_inst_entry_colors.insert(inst, InstColor::new(cur_color));
debug!(" -> side-effecting; incrementing color for next inst");
cur_color += 1;
}
if let Some(c) = is_constant_64bit(f, inst) {
debug!(" -> constant: {}", c);
inst_constants.insert(inst, c);
}
for arg in f.dfg.inst_args(inst) {
let arg = f.dfg.resolve_aliases(*arg);
value_uses[arg] += 1;
}
}
block_end_colors[bb] = InstColor::new(cur_color);
}
Ok(Lower {
f,
vcode,
value_regs,
retval_regs,
block_end_colors,
side_effect_inst_entry_colors,
inst_constants,
next_vreg,
value_uses,
value_lowered_uses: SecondaryMap::default(),
inst_sunk: FxHashSet::default(),
cur_scan_entry_color: None,
cur_inst: None,
block_insts: vec![],
block_ranges: vec![],
bb_insts: vec![],
ir_insts: vec![],
pinned_reg: None,
vm_context,
})
}
fn gen_arg_setup(&mut self) {
if let Some(entry_bb) = self.f.layout.entry_block() {
debug!(
"gen_arg_setup: entry BB {} args are:\n{:?}",
entry_bb,
self.f.dfg.block_params(entry_bb)
);
for (i, param) in self.f.dfg.block_params(entry_bb).iter().enumerate() {
if !self.vcode.abi().arg_is_needed_in_body(i) {
continue;
}
let regs = writable_value_regs(self.value_regs[*param]);
for insn in self.vcode.abi().gen_copy_arg_to_regs(i, regs).into_iter() {
self.emit(insn);
}
}
if let Some(insn) = self.vcode.abi().gen_retval_area_setup() {
self.emit(insn);
}
}
}
fn gen_retval_setup(&mut self, gen_ret_inst: GenerateReturn) {
let retval_regs = self.retval_regs.clone();
for (i, regs) in retval_regs.into_iter().enumerate() {
let regs = writable_value_regs(regs);
for insn in self
.vcode
.abi()
.gen_copy_regs_to_retval(i, regs)
.into_iter()
{
self.emit(insn);
}
}
let inst = match gen_ret_inst {
GenerateReturn::Yes => self.vcode.abi().gen_ret(),
GenerateReturn::No => self.vcode.abi().gen_epilogue_placeholder(),
};
self.emit(inst);
}
fn lower_edge(&mut self, pred: Block, inst: Inst, succ: Block) -> CodegenResult<()> {
debug!("lower_edge: pred {} succ {}", pred, succ);
let num_args = self.f.dfg.block_params(succ).len();
debug_assert!(num_args == self.f.dfg.inst_variable_args(inst).len());
if num_args == 0 {
return Ok(());
}
self.cur_inst = Some(inst);
let mut const_bundles: SmallVec<[_; 16]> = SmallVec::new();
let mut var_bundles: SmallVec<[_; 16]> = SmallVec::new();
let mut i = 0;
for (dst_val, src_val) in self
.f
.dfg
.block_params(succ)
.iter()
.zip(self.f.dfg.inst_variable_args(inst).iter())
{
let src_val = self.f.dfg.resolve_aliases(*src_val);
let ty = self.f.dfg.value_type(src_val);
debug_assert!(ty == self.f.dfg.value_type(*dst_val));
let dst_regs = self.value_regs[*dst_val];
let input = self.get_value_as_source_or_const(src_val);
debug!("jump arg {} is {}", i, src_val);
i += 1;
if let Some(c) = input.constant {
debug!(" -> constant {}", c);
const_bundles.push((ty, writable_value_regs(dst_regs), c));
} else {
let src_regs = self.put_value_in_regs(src_val);
debug!(" -> reg {:?}", src_regs);
if dst_regs != src_regs {
var_bundles.push((ty, writable_value_regs(dst_regs), src_regs));
}
}
}
let mut src_reg_set = FxHashSet::<Reg>::default();
for (_, _, src_regs) in &var_bundles {
for ® in src_regs.regs() {
src_reg_set.insert(reg);
}
}
let mut overlaps = false;
'outer: for (_, dst_regs, _) in &var_bundles {
for ® in dst_regs.regs() {
if src_reg_set.contains(®.to_reg()) {
overlaps = true;
break 'outer;
}
}
}
if !overlaps {
for (ty, dst_regs, src_regs) in &var_bundles {
let (_, reg_tys) = I::rc_for_type(*ty)?;
for ((dst, src), reg_ty) in dst_regs
.regs()
.iter()
.zip(src_regs.regs().iter())
.zip(reg_tys.iter())
{
self.emit(I::gen_move(*dst, *src, *reg_ty));
}
}
} else {
let mut tmp_regs = SmallVec::<[ValueRegs<Writable<Reg>>; 16]>::new();
for (ty, _, _) in &var_bundles {
tmp_regs.push(self.alloc_tmp(*ty));
}
for ((ty, _, src_reg), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
let (_, reg_tys) = I::rc_for_type(*ty)?;
for ((tmp, src), reg_ty) in tmp_reg
.regs()
.iter()
.zip(src_reg.regs().iter())
.zip(reg_tys.iter())
{
self.emit(I::gen_move(*tmp, *src, *reg_ty));
}
}
for ((ty, dst_reg, _), tmp_reg) in var_bundles.iter().zip(tmp_regs.iter()) {
let (_, reg_tys) = I::rc_for_type(*ty)?;
for ((dst, tmp), reg_ty) in dst_reg
.regs()
.iter()
.zip(tmp_reg.regs().iter())
.zip(reg_tys.iter())
{
self.emit(I::gen_move(*dst, tmp.to_reg(), *reg_ty));
}
}
}
for (ty, dst_reg, const_val) in &const_bundles {
for inst in I::gen_constant(*dst_reg, *const_val as u128, *ty, |ty| {
self.alloc_tmp(ty).only_reg().unwrap()
})
.into_iter()
{
self.emit(inst);
}
}
Ok(())
}
fn is_inst_sunk(&self, inst: Inst) -> bool {
self.inst_sunk.contains(&inst)
}
fn is_any_inst_result_needed(&self, inst: Inst) -> bool {
self.f
.dfg
.inst_results(inst)
.iter()
.any(|&result| self.value_lowered_uses[result] > 0)
}
fn lower_clif_block<B: LowerBackend<MInst = I>>(
&mut self,
backend: &B,
block: Block,
) -> CodegenResult<()> {
self.cur_scan_entry_color = Some(self.block_end_colors[block]);
for inst in self.f.layout.block_insts(block).rev() {
let data = &self.f.dfg[inst];
let has_side_effect = has_lowering_side_effect(self.f, inst);
if self.is_inst_sunk(inst) {
continue;
}
let value_needed = self.is_any_inst_result_needed(inst);
debug!(
"lower_clif_block: block {} inst {} ({:?}) is_branch {} side_effect {} value_needed {}",
block,
inst,
data,
data.opcode().is_branch(),
has_side_effect,
value_needed,
);
if self.f.dfg[inst].opcode().is_branch() {
continue;
}
self.cur_inst = Some(inst);
if has_side_effect {
let entry_color = *self
.side_effect_inst_entry_colors
.get(&inst)
.expect("every side-effecting inst should have a color-map entry");
self.cur_scan_entry_color = Some(entry_color);
}
if has_side_effect || value_needed {
debug!("lowering: inst {}: {:?}", inst, self.f.dfg[inst]);
backend.lower(self, inst)?;
}
if data.opcode().is_return() {
let gen_ret = if data.opcode() == Opcode::Return {
GenerateReturn::Yes
} else {
debug_assert!(data.opcode() == Opcode::FallthroughReturn);
GenerateReturn::No
};
self.gen_retval_setup(gen_ret);
}
let loc = self.srcloc(inst);
self.finish_ir_inst(loc);
}
self.cur_scan_entry_color = None;
Ok(())
}
fn finish_ir_inst(&mut self, loc: SourceLoc) {
for mut tuple in self.ir_insts.drain(..).rev() {
tuple.loc = loc;
self.bb_insts.push(tuple);
}
}
fn finish_bb(&mut self) {
let start = self.block_insts.len();
for tuple in self.bb_insts.drain(..).rev() {
self.block_insts.push(tuple);
}
let end = self.block_insts.len();
self.block_ranges.push((start, end));
}
fn copy_bbs_to_vcode(&mut self) {
for &(start, end) in self.block_ranges.iter().rev() {
for &InstTuple {
loc,
is_safepoint,
ref inst,
} in &self.block_insts[start..end]
{
self.vcode.set_srcloc(loc);
self.vcode.push(inst.clone(), is_safepoint);
}
self.vcode.end_bb();
}
}
fn lower_clif_branches<B: LowerBackend<MInst = I>>(
&mut self,
backend: &B,
block: Block,
branches: &SmallVec<[Inst; 2]>,
targets: &SmallVec<[MachLabel; 2]>,
) -> CodegenResult<()> {
debug!(
"lower_clif_branches: block {} branches {:?} targets {:?}",
block, branches, targets,
);
self.cur_inst = Some(branches[0]);
backend.lower_branch_group(self, branches, targets)?;
let loc = self.srcloc(branches[0]);
self.finish_ir_inst(loc);
Ok(())
}
fn collect_branches_and_targets(
&self,
bindex: BlockIndex,
_bb: Block,
branches: &mut SmallVec<[Inst; 2]>,
targets: &mut SmallVec<[MachLabel; 2]>,
) {
branches.clear();
targets.clear();
let mut last_inst = None;
for &(inst, succ) in self.vcode.block_order().succ_indices(bindex) {
if last_inst != Some(inst) {
branches.push(inst);
} else {
debug_assert!(self.f.dfg[inst].opcode() == Opcode::BrTable);
debug_assert!(branches.len() == 1);
}
last_inst = Some(inst);
targets.push(MachLabel::from_block(succ));
}
}
pub fn lower<B: LowerBackend<MInst = I>>(
mut self,
backend: &B,
) -> CodegenResult<(VCode<I>, StackmapRequestInfo)> {
debug!("about to lower function: {:?}", self.f);
let maybe_tmp = if let Some(temp_ty) = self.vcode.abi().temp_needed() {
Some(self.alloc_tmp(temp_ty).only_reg().unwrap())
} else {
None
};
self.vcode.abi().init(maybe_tmp);
self.pinned_reg = backend.maybe_pinned_reg();
self.vcode.set_entry(0);
let mut branches: SmallVec<[Inst; 2]> = SmallVec::new();
let mut targets: SmallVec<[MachLabel; 2]> = SmallVec::new();
let lowered_order: SmallVec<[LoweredBlock; 64]> = self
.vcode
.block_order()
.lowered_order()
.iter()
.cloned()
.collect();
for (bindex, lb) in lowered_order.iter().enumerate().rev() {
let bindex = bindex as BlockIndex;
if let Some(bb) = lb.orig_block() {
self.collect_branches_and_targets(bindex, bb, &mut branches, &mut targets);
if branches.len() > 0 {
self.lower_clif_branches(backend, bb, &branches, &targets)?;
self.finish_ir_inst(self.srcloc(branches[0]));
}
} else {
let (_, succ) = self.vcode.block_order().succ_indices(bindex)[0];
self.emit(I::gen_jump(MachLabel::from_block(succ)));
self.finish_ir_inst(SourceLoc::default());
}
if let Some((pred, inst, succ)) = lb.out_edge() {
self.lower_edge(pred, inst, succ)?;
self.finish_ir_inst(SourceLoc::default());
}
if let Some(bb) = lb.orig_block() {
self.lower_clif_block(backend, bb)?;
}
if let Some((pred, inst, succ)) = lb.in_edge() {
self.lower_edge(pred, inst, succ)?;
self.finish_ir_inst(SourceLoc::default());
}
if bindex == 0 {
self.gen_arg_setup();
self.finish_ir_inst(SourceLoc::default());
}
self.finish_bb();
}
self.copy_bbs_to_vcode();
let (vcode, stack_map_info) = self.vcode.build();
debug!("built vcode: {:?}", vcode);
Ok((vcode, stack_map_info))
}
fn put_value_in_regs(&mut self, val: Value) -> ValueRegs<Reg> {
debug!("put_value_in_reg: val {}", val);
let mut regs = self.value_regs[val];
debug!(" -> regs {:?}", regs);
assert!(regs.is_valid());
self.value_lowered_uses[val] += 1;
if let ValueDef::Result(i, 0) = self.f.dfg.value_def(val) {
if self.f.dfg[i].opcode() == Opcode::GetPinnedReg {
if let Some(pr) = self.pinned_reg {
regs = ValueRegs::one(pr);
}
}
}
regs
}
fn get_value_as_source_or_const(&self, val: Value) -> NonRegInput {
debug!(
"get_input_for_val: val {} at cur_inst {:?} cur_scan_entry_color {:?}",
val, self.cur_inst, self.cur_scan_entry_color,
);
let inst = match self.f.dfg.value_def(val) {
ValueDef::Result(src_inst, result_idx) => {
let src_side_effect = has_lowering_side_effect(self.f, src_inst);
debug!(" -> src inst {}", src_inst);
debug!(" -> has lowering side effect: {}", src_side_effect);
if !src_side_effect {
Some((src_inst, result_idx))
} else {
if self.cur_scan_entry_color.is_some()
&& self.value_uses[val] == 1
&& self.value_lowered_uses[val] == 0
&& self.num_outputs(src_inst) == 1
&& self
.side_effect_inst_entry_colors
.get(&src_inst)
.unwrap()
.get()
+ 1
== self.cur_scan_entry_color.unwrap().get()
{
Some((src_inst, 0))
} else {
None
}
}
}
_ => None,
};
let constant = inst.and_then(|(inst, _)| self.get_constant(inst));
NonRegInput { inst, constant }
}
}
impl<'func, I: VCodeInst> LowerCtx for Lower<'func, I> {
type I = I;
fn abi(&mut self) -> &mut dyn ABICallee<I = I> {
self.vcode.abi()
}
fn retval(&self, idx: usize) -> ValueRegs<Writable<Reg>> {
writable_value_regs(self.retval_regs[idx])
}
fn get_vm_context(&self) -> Option<Reg> {
self.vm_context
}
fn data(&self, ir_inst: Inst) -> &InstructionData {
&self.f.dfg[ir_inst]
}
fn ty(&self, ir_inst: Inst) -> Type {
self.f.dfg.ctrl_typevar(ir_inst)
}
fn call_target<'b>(&'b self, ir_inst: Inst) -> Option<(&'b ExternalName, RelocDistance)> {
match &self.f.dfg[ir_inst] {
&InstructionData::Call { func_ref, .. }
| &InstructionData::FuncAddr { func_ref, .. } => {
let funcdata = &self.f.dfg.ext_funcs[func_ref];
let dist = funcdata.reloc_distance();
Some((&funcdata.name, dist))
}
_ => None,
}
}
fn call_sig<'b>(&'b self, ir_inst: Inst) -> Option<&'b Signature> {
match &self.f.dfg[ir_inst] {
&InstructionData::Call { func_ref, .. } => {
let funcdata = &self.f.dfg.ext_funcs[func_ref];
Some(&self.f.dfg.signatures[funcdata.signature])
}
&InstructionData::CallIndirect { sig_ref, .. } => Some(&self.f.dfg.signatures[sig_ref]),
_ => None,
}
}
fn symbol_value<'b>(&'b self, ir_inst: Inst) -> Option<(&'b ExternalName, RelocDistance, i64)> {
match &self.f.dfg[ir_inst] {
&InstructionData::UnaryGlobalValue { global_value, .. } => {
let gvdata = &self.f.global_values[global_value];
match gvdata {
&GlobalValueData::Symbol {
ref name,
ref offset,
..
} => {
let offset = offset.bits();
let dist = gvdata.maybe_reloc_distance().unwrap();
Some((name, dist, offset))
}
_ => None,
}
}
_ => None,
}
}
fn memflags(&self, ir_inst: Inst) -> Option<MemFlags> {
match &self.f.dfg[ir_inst] {
&InstructionData::AtomicCas { flags, .. } => Some(flags),
&InstructionData::AtomicRmw { flags, .. } => Some(flags),
&InstructionData::Load { flags, .. }
| &InstructionData::LoadComplex { flags, .. }
| &InstructionData::LoadNoOffset { flags, .. }
| &InstructionData::Store { flags, .. }
| &InstructionData::StoreComplex { flags, .. } => Some(flags),
&InstructionData::StoreNoOffset { flags, .. } => Some(flags),
_ => None,
}
}
fn srcloc(&self, ir_inst: Inst) -> SourceLoc {
self.f.srclocs[ir_inst]
}
fn num_inputs(&self, ir_inst: Inst) -> usize {
self.f.dfg.inst_args(ir_inst).len()
}
fn num_outputs(&self, ir_inst: Inst) -> usize {
self.f.dfg.inst_results(ir_inst).len()
}
fn input_ty(&self, ir_inst: Inst, idx: usize) -> Type {
let val = self.f.dfg.inst_args(ir_inst)[idx];
let val = self.f.dfg.resolve_aliases(val);
self.f.dfg.value_type(val)
}
fn output_ty(&self, ir_inst: Inst, idx: usize) -> Type {
self.f.dfg.value_type(self.f.dfg.inst_results(ir_inst)[idx])
}
fn get_constant(&self, ir_inst: Inst) -> Option<u64> {
self.inst_constants.get(&ir_inst).cloned()
}
fn get_input_as_source_or_const(&self, ir_inst: Inst, idx: usize) -> NonRegInput {
let val = self.f.dfg.inst_args(ir_inst)[idx];
let val = self.f.dfg.resolve_aliases(val);
self.get_value_as_source_or_const(val)
}
fn put_input_in_regs(&mut self, ir_inst: Inst, idx: usize) -> ValueRegs<Reg> {
let val = self.f.dfg.inst_args(ir_inst)[idx];
let val = self.f.dfg.resolve_aliases(val);
self.put_value_in_regs(val)
}
fn get_output(&self, ir_inst: Inst, idx: usize) -> ValueRegs<Writable<Reg>> {
let val = self.f.dfg.inst_results(ir_inst)[idx];
writable_value_regs(self.value_regs[val])
}
fn alloc_tmp(&mut self, ty: Type) -> ValueRegs<Writable<Reg>> {
writable_value_regs(alloc_vregs(ty, &mut self.next_vreg, &mut self.vcode).unwrap())
}
fn emit(&mut self, mach_inst: I) {
self.ir_insts.push(InstTuple {
loc: SourceLoc::default(),
is_safepoint: false,
inst: mach_inst,
});
}
fn emit_safepoint(&mut self, mach_inst: I) {
self.ir_insts.push(InstTuple {
loc: SourceLoc::default(),
is_safepoint: true,
inst: mach_inst,
});
}
fn sink_inst(&mut self, ir_inst: Inst) {
assert!(has_lowering_side_effect(self.f, ir_inst));
assert!(self.cur_scan_entry_color.is_some());
let sunk_inst_entry_color = self
.side_effect_inst_entry_colors
.get(&ir_inst)
.cloned()
.unwrap();
let sunk_inst_exit_color = InstColor::new(sunk_inst_entry_color.get() + 1);
assert!(sunk_inst_exit_color == self.cur_scan_entry_color.unwrap());
self.cur_scan_entry_color = Some(sunk_inst_entry_color);
self.inst_sunk.insert(ir_inst);
}
fn get_constant_data(&self, constant_handle: Constant) -> &ConstantData {
self.f.dfg.constants.get(constant_handle)
}
fn use_constant(&mut self, constant: VCodeConstantData) -> VCodeConstant {
self.vcode.constants().insert(constant)
}
fn get_immediate(&self, ir_inst: Inst) -> Option<DataValue> {
let inst_data = self.data(ir_inst);
match inst_data {
InstructionData::Shuffle { mask, .. } => {
let buffer = self.f.dfg.immediates.get(mask.clone()).unwrap().as_slice();
let value = DataValue::V128(buffer.try_into().expect("a 16-byte data buffer"));
Some(value)
}
InstructionData::UnaryConst {
constant_handle, ..
} => {
let buffer = self.f.dfg.constants.get(constant_handle.clone()).as_slice();
let value = DataValue::V128(buffer.try_into().expect("a 16-byte data buffer"));
Some(value)
}
_ => inst_data.imm_value(),
}
}
fn ensure_in_vreg(&mut self, reg: Reg, ty: Type) -> Reg {
if reg.is_virtual() {
reg
} else {
let new_reg = self.alloc_tmp(ty).only_reg().unwrap();
self.emit(I::gen_move(new_reg, reg, ty));
new_reg.to_reg()
}
}
}
pub(crate) fn visit_block_succs<F: FnMut(Inst, Block)>(f: &Function, block: Block, mut visit: F) {
for inst in f.layout.block_likely_branches(block) {
if f.dfg[inst].opcode().is_branch() {
visit_branch_targets(f, block, inst, &mut visit);
}
}
}
fn visit_branch_targets<F: FnMut(Inst, Block)>(
f: &Function,
block: Block,
inst: Inst,
visit: &mut F,
) {
if f.dfg[inst].opcode() == Opcode::Fallthrough {
visit(inst, f.layout.next_block(block).unwrap());
} else {
match f.dfg[inst].analyze_branch(&f.dfg.value_lists) {
BranchInfo::NotABranch => {}
BranchInfo::SingleDest(dest, _) => {
visit(inst, dest);
}
BranchInfo::Table(table, maybe_dest) => {
if let Some(dest) = maybe_dest {
visit(inst, dest);
}
for &dest in f.jump_tables[table].as_slice() {
visit(inst, dest);
}
}
}
}
}