Trait wasmtime_jit::trampoline::ir::InstBuilder[][src]

pub trait InstBuilder<'f>: InstBuilderBase<'f> {
    pub fn jump(self, block: Block, args: &[Value]) -> Inst { ... }
pub fn fallthrough(self, block: Block, args: &[Value]) -> Inst { ... }
pub fn brz(self, c: Value, block: Block, args: &[Value]) -> Inst { ... }
pub fn brnz(self, c: Value, block: Block, args: &[Value]) -> Inst { ... }
pub fn br_icmp<T1>(
        self,
        Cond: T1,
        x: Value,
        y: Value,
        block: Block,
        args: &[Value]
    ) -> Inst
    where
        T1: Into<IntCC>
, { ... }
pub fn brif<T1>(
        self,
        Cond: T1,
        f: Value,
        block: Block,
        args: &[Value]
    ) -> Inst
    where
        T1: Into<IntCC>
, { ... }
pub fn brff<T1>(
        self,
        Cond: T1,
        f: Value,
        block: Block,
        args: &[Value]
    ) -> Inst
    where
        T1: Into<FloatCC>
, { ... }
pub fn br_table(self, x: Value, block: Block, JT: JumpTable) -> Inst { ... }
pub fn jump_table_entry<T1>(
        self,
        x: Value,
        addr: Value,
        Size: T1,
        JT: JumpTable
    ) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn jump_table_base(self, iAddr: Type, JT: JumpTable) -> Value { ... }
pub fn indirect_jump_table_br(self, addr: Value, JT: JumpTable) -> Inst { ... }
pub fn debugtrap(self) -> Inst { ... }
pub fn trap<T1>(self, code: T1) -> Inst
    where
        T1: Into<TrapCode>
, { ... }
pub fn trapz<T1>(self, c: Value, code: T1) -> Inst
    where
        T1: Into<TrapCode>
, { ... }
pub fn resumable_trap<T1>(self, code: T1) -> Inst
    where
        T1: Into<TrapCode>
, { ... }
pub fn trapnz<T1>(self, c: Value, code: T1) -> Inst
    where
        T1: Into<TrapCode>
, { ... }
pub fn resumable_trapnz<T1>(self, c: Value, code: T1) -> Inst
    where
        T1: Into<TrapCode>
, { ... }
pub fn trapif<T1, T2>(self, Cond: T1, f: Value, code: T2) -> Inst
    where
        T1: Into<IntCC>,
        T2: Into<TrapCode>
, { ... }
pub fn trapff<T1, T2>(self, Cond: T1, f: Value, code: T2) -> Inst
    where
        T1: Into<FloatCC>,
        T2: Into<TrapCode>
, { ... }
pub fn return_(self, rvals: &[Value]) -> Inst { ... }
pub fn fallthrough_return(self, rvals: &[Value]) -> Inst { ... }
pub fn call(self, FN: FuncRef, args: &[Value]) -> Inst { ... }
pub fn call_indirect(
        self,
        SIG: SigRef,
        callee: Value,
        args: &[Value]
    ) -> Inst { ... }
pub fn func_addr(self, iAddr: Type, FN: FuncRef) -> Value { ... }
pub fn splat(self, TxN: Type, x: Value) -> Value { ... }
pub fn swizzle(self, TxN: Type, x: Value, y: Value) -> Value { ... }
pub fn insertlane<T1>(self, x: Value, y: Value, Idx: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn extractlane<T1>(self, x: Value, Idx: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn imin(self, x: Value, y: Value) -> Value { ... }
pub fn umin(self, x: Value, y: Value) -> Value { ... }
pub fn imax(self, x: Value, y: Value) -> Value { ... }
pub fn umax(self, x: Value, y: Value) -> Value { ... }
pub fn avg_round(self, x: Value, y: Value) -> Value { ... }
pub fn load<T1, T2>(
        self,
        Mem: Type,
        MemFlags: T1,
        p: Value,
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn load_complex<T1, T2>(
        self,
        Mem: Type,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn store<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        p: Value,
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn store_complex<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        args: &[Value],
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload8<T1, T2>(
        self,
        iExt8: Type,
        MemFlags: T1,
        p: Value,
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload8_complex<T1, T2>(
        self,
        iExt8: Type,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload8<T1, T2>(
        self,
        iExt8: Type,
        MemFlags: T1,
        p: Value,
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload8_complex<T1, T2>(
        self,
        iExt8: Type,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn istore8<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        p: Value,
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn istore8_complex<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        args: &[Value],
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload16<T1, T2>(
        self,
        iExt16: Type,
        MemFlags: T1,
        p: Value,
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload16_complex<T1, T2>(
        self,
        iExt16: Type,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload16<T1, T2>(
        self,
        iExt16: Type,
        MemFlags: T1,
        p: Value,
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload16_complex<T1, T2>(
        self,
        iExt16: Type,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn istore16<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        p: Value,
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn istore16_complex<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        args: &[Value],
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload32_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload32_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn istore32<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        p: Value,
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn istore32_complex<T1, T2>(
        self,
        MemFlags: T1,
        x: Value,
        args: &[Value],
        Offset: T2
    ) -> Inst
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload8x8_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload8x8_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload16x4_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload16x4_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn uload32x2_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn sload32x2_complex<T1, T2>(
        self,
        MemFlags: T1,
        args: &[Value],
        Offset: T2
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<Offset32>
, { ... }
pub fn stack_load<T1>(self, Mem: Type, SS: StackSlot, Offset: T1) -> Value
    where
        T1: Into<Offset32>
, { ... }
pub fn stack_store<T1>(self, x: Value, SS: StackSlot, Offset: T1) -> Inst
    where
        T1: Into<Offset32>
, { ... }
pub fn stack_addr<T1>(self, iAddr: Type, SS: StackSlot, Offset: T1) -> Value
    where
        T1: Into<Offset32>
, { ... }
pub fn global_value(self, Mem: Type, GV: GlobalValue) -> Value { ... }
pub fn symbol_value(self, Mem: Type, GV: GlobalValue) -> Value { ... }
pub fn tls_value(self, Mem: Type, GV: GlobalValue) -> Value { ... }
pub fn heap_addr<T1>(
        self,
        iAddr: Type,
        H: Heap,
        p: Value,
        Size: T1
    ) -> Value
    where
        T1: Into<Uimm32>
, { ... }
pub fn get_pinned_reg(self, iAddr: Type) -> Value { ... }
pub fn set_pinned_reg(self, addr: Value) -> Inst { ... }
pub fn table_addr<T1>(
        self,
        iAddr: Type,
        T: Table,
        p: Value,
        Offset: T1
    ) -> Value
    where
        T1: Into<Offset32>
, { ... }
pub fn iconst<T1>(self, Int: Type, N: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn f32const<T1>(self, N: T1) -> Value
    where
        T1: Into<Ieee32>
, { ... }
pub fn f64const<T1>(self, N: T1) -> Value
    where
        T1: Into<Ieee64>
, { ... }
pub fn bconst<T1>(self, Bool: Type, N: T1) -> Value
    where
        T1: Into<bool>
, { ... }
pub fn vconst<T1>(self, TxN: Type, N: T1) -> Value
    where
        T1: Into<Constant>
, { ... }
pub fn const_addr<T1>(self, iAddr: Type, constant: T1) -> Value
    where
        T1: Into<Constant>
, { ... }
pub fn shuffle<T1>(self, a: Value, b: Value, mask: T1) -> Value
    where
        T1: Into<Immediate>
, { ... }
pub fn null(self, Ref: Type) -> Value { ... }
pub fn nop(self) -> Inst { ... }
pub fn select(self, c: Value, x: Value, y: Value) -> Value { ... }
pub fn selectif<T1>(
        self,
        Any: Type,
        cc: T1,
        flags: Value,
        x: Value,
        y: Value
    ) -> Value
    where
        T1: Into<IntCC>
, { ... }
pub fn selectif_spectre_guard<T1>(
        self,
        Any: Type,
        cc: T1,
        flags: Value,
        x: Value,
        y: Value
    ) -> Value
    where
        T1: Into<IntCC>
, { ... }
pub fn bitselect(self, c: Value, x: Value, y: Value) -> Value { ... }
pub fn copy(self, x: Value) -> Value { ... }
pub fn spill(self, x: Value) -> Value { ... }
pub fn fill(self, x: Value) -> Value { ... }
pub fn fill_nop(self, x: Value) -> Value { ... }
pub fn dummy_sarg_t(self) -> Value { ... }
pub fn regmove<T1, T2>(self, x: Value, src: T1, dst: T2) -> Inst
    where
        T1: Into<u16>,
        T2: Into<u16>
, { ... }
pub fn copy_special<T1, T2>(self, src: T1, dst: T2) -> Inst
    where
        T1: Into<u16>,
        T2: Into<u16>
, { ... }
pub fn copy_to_ssa<T1>(self, Any: Type, src: T1) -> Value
    where
        T1: Into<u16>
, { ... }
pub fn copy_nop(self, x: Value) -> Value { ... }
pub fn adjust_sp_down(self, delta: Value) -> Inst { ... }
pub fn adjust_sp_up_imm<T1>(self, Offset: T1) -> Inst
    where
        T1: Into<Imm64>
, { ... }
pub fn adjust_sp_down_imm<T1>(self, Offset: T1) -> Inst
    where
        T1: Into<Imm64>
, { ... }
pub fn ifcmp_sp(self, addr: Value) -> Value { ... }
pub fn regspill<T1>(self, x: Value, src: T1, SS: StackSlot) -> Inst
    where
        T1: Into<u16>
, { ... }
pub fn regfill<T1>(self, x: Value, SS: StackSlot, dst: T1) -> Inst
    where
        T1: Into<u16>
, { ... }
pub fn safepoint(self, args: &[Value]) -> Inst { ... }
pub fn vsplit(self, x: Value) -> (Value, Value) { ... }
pub fn vconcat(self, x: Value, y: Value) -> Value { ... }
pub fn vselect(self, c: Value, x: Value, y: Value) -> Value { ... }
pub fn vany_true(self, a: Value) -> Value { ... }
pub fn vall_true(self, a: Value) -> Value { ... }
pub fn vhigh_bits(self, Int: Type, a: Value) -> Value { ... }
pub fn icmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
    where
        T1: Into<IntCC>
, { ... }
pub fn icmp_imm<T1, T2>(self, Cond: T1, x: Value, Y: T2) -> Value
    where
        T1: Into<IntCC>,
        T2: Into<Imm64>
, { ... }
pub fn ifcmp(self, x: Value, y: Value) -> Value { ... }
pub fn ifcmp_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn iadd(self, x: Value, y: Value) -> Value { ... }
pub fn uadd_sat(self, x: Value, y: Value) -> Value { ... }
pub fn sadd_sat(self, x: Value, y: Value) -> Value { ... }
pub fn isub(self, x: Value, y: Value) -> Value { ... }
pub fn usub_sat(self, x: Value, y: Value) -> Value { ... }
pub fn ssub_sat(self, x: Value, y: Value) -> Value { ... }
pub fn ineg(self, x: Value) -> Value { ... }
pub fn iabs(self, x: Value) -> Value { ... }
pub fn imul(self, x: Value, y: Value) -> Value { ... }
pub fn umulhi(self, x: Value, y: Value) -> Value { ... }
pub fn smulhi(self, x: Value, y: Value) -> Value { ... }
pub fn udiv(self, x: Value, y: Value) -> Value { ... }
pub fn sdiv(self, x: Value, y: Value) -> Value { ... }
pub fn urem(self, x: Value, y: Value) -> Value { ... }
pub fn srem(self, x: Value, y: Value) -> Value { ... }
pub fn iadd_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn imul_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn udiv_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn sdiv_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn urem_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn srem_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn irsub_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn iadd_cin(self, x: Value, y: Value, c_in: Value) -> Value { ... }
pub fn iadd_ifcin(self, x: Value, y: Value, c_in: Value) -> Value { ... }
pub fn iadd_cout(self, x: Value, y: Value) -> (Value, Value) { ... }
pub fn iadd_ifcout(self, x: Value, y: Value) -> (Value, Value) { ... }
pub fn iadd_carry(self, x: Value, y: Value, c_in: Value) -> (Value, Value) { ... }
pub fn iadd_ifcarry(self, x: Value, y: Value, c_in: Value) -> (Value, Value) { ... }
pub fn isub_bin(self, x: Value, y: Value, b_in: Value) -> Value { ... }
pub fn isub_ifbin(self, x: Value, y: Value, b_in: Value) -> Value { ... }
pub fn isub_bout(self, x: Value, y: Value) -> (Value, Value) { ... }
pub fn isub_ifbout(self, x: Value, y: Value) -> (Value, Value) { ... }
pub fn isub_borrow(self, x: Value, y: Value, b_in: Value) -> (Value, Value) { ... }
pub fn isub_ifborrow(
        self,
        x: Value,
        y: Value,
        b_in: Value
    ) -> (Value, Value) { ... }
pub fn band(self, x: Value, y: Value) -> Value { ... }
pub fn bor(self, x: Value, y: Value) -> Value { ... }
pub fn bxor(self, x: Value, y: Value) -> Value { ... }
pub fn bnot(self, x: Value) -> Value { ... }
pub fn band_not(self, x: Value, y: Value) -> Value { ... }
pub fn bor_not(self, x: Value, y: Value) -> Value { ... }
pub fn bxor_not(self, x: Value, y: Value) -> Value { ... }
pub fn band_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn bor_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn bxor_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn rotl(self, x: Value, y: Value) -> Value { ... }
pub fn rotr(self, x: Value, y: Value) -> Value { ... }
pub fn rotl_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn rotr_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn ishl(self, x: Value, y: Value) -> Value { ... }
pub fn ushr(self, x: Value, y: Value) -> Value { ... }
pub fn sshr(self, x: Value, y: Value) -> Value { ... }
pub fn ishl_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn ushr_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn sshr_imm<T1>(self, x: Value, Y: T1) -> Value
    where
        T1: Into<Imm64>
, { ... }
pub fn bitrev(self, x: Value) -> Value { ... }
pub fn clz(self, x: Value) -> Value { ... }
pub fn cls(self, x: Value) -> Value { ... }
pub fn ctz(self, x: Value) -> Value { ... }
pub fn popcnt(self, x: Value) -> Value { ... }
pub fn fcmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value
    where
        T1: Into<FloatCC>
, { ... }
pub fn ffcmp(self, x: Value, y: Value) -> Value { ... }
pub fn fadd(self, x: Value, y: Value) -> Value { ... }
pub fn fsub(self, x: Value, y: Value) -> Value { ... }
pub fn fmul(self, x: Value, y: Value) -> Value { ... }
pub fn fdiv(self, x: Value, y: Value) -> Value { ... }
pub fn sqrt(self, x: Value) -> Value { ... }
pub fn fma(self, x: Value, y: Value, z: Value) -> Value { ... }
pub fn fneg(self, x: Value) -> Value { ... }
pub fn fabs(self, x: Value) -> Value { ... }
pub fn fcopysign(self, x: Value, y: Value) -> Value { ... }
pub fn fmin(self, x: Value, y: Value) -> Value { ... }
pub fn fmin_pseudo(self, x: Value, y: Value) -> Value { ... }
pub fn fmax(self, x: Value, y: Value) -> Value { ... }
pub fn fmax_pseudo(self, x: Value, y: Value) -> Value { ... }
pub fn ceil(self, x: Value) -> Value { ... }
pub fn floor(self, x: Value) -> Value { ... }
pub fn trunc(self, x: Value) -> Value { ... }
pub fn nearest(self, x: Value) -> Value { ... }
pub fn is_null(self, x: Value) -> Value { ... }
pub fn is_invalid(self, x: Value) -> Value { ... }
pub fn trueif<T1>(self, Cond: T1, f: Value) -> Value
    where
        T1: Into<IntCC>
, { ... }
pub fn trueff<T1>(self, Cond: T1, f: Value) -> Value
    where
        T1: Into<FloatCC>
, { ... }
pub fn bitcast(self, MemTo: Type, x: Value) -> Value { ... }
pub fn raw_bitcast(self, AnyTo: Type, x: Value) -> Value { ... }
pub fn scalar_to_vector(self, TxN: Type, s: Value) -> Value { ... }
pub fn breduce(self, BoolTo: Type, x: Value) -> Value { ... }
pub fn bextend(self, BoolTo: Type, x: Value) -> Value { ... }
pub fn bint(self, IntTo: Type, x: Value) -> Value { ... }
pub fn bmask(self, IntTo: Type, x: Value) -> Value { ... }
pub fn ireduce(self, IntTo: Type, x: Value) -> Value { ... }
pub fn snarrow(self, x: Value, y: Value) -> Value { ... }
pub fn unarrow(self, x: Value, y: Value) -> Value { ... }
pub fn swiden_low(self, x: Value) -> Value { ... }
pub fn swiden_high(self, x: Value) -> Value { ... }
pub fn uwiden_low(self, x: Value) -> Value { ... }
pub fn uwiden_high(self, x: Value) -> Value { ... }
pub fn widening_pairwise_dot_product_s(self, x: Value, y: Value) -> Value { ... }
pub fn uextend(self, IntTo: Type, x: Value) -> Value { ... }
pub fn sextend(self, IntTo: Type, x: Value) -> Value { ... }
pub fn fpromote(self, FloatTo: Type, x: Value) -> Value { ... }
pub fn fdemote(self, FloatTo: Type, x: Value) -> Value { ... }
pub fn fcvt_to_uint(self, IntTo: Type, x: Value) -> Value { ... }
pub fn fcvt_to_uint_sat(self, IntTo: Type, x: Value) -> Value { ... }
pub fn fcvt_to_sint(self, IntTo: Type, x: Value) -> Value { ... }
pub fn fcvt_to_sint_sat(self, IntTo: Type, x: Value) -> Value { ... }
pub fn fcvt_from_uint(self, FloatTo: Type, x: Value) -> Value { ... }
pub fn fcvt_from_sint(self, FloatTo: Type, x: Value) -> Value { ... }
pub fn isplit(self, x: Value) -> (Value, Value) { ... }
pub fn iconcat(self, lo: Value, hi: Value) -> Value { ... }
pub fn atomic_rmw<T1, T2>(
        self,
        AtomicMem: Type,
        MemFlags: T1,
        AtomicRmwOp: T2,
        p: Value,
        x: Value
    ) -> Value
    where
        T1: Into<MemFlags>,
        T2: Into<AtomicRmwOp>
, { ... }
pub fn atomic_cas<T1>(
        self,
        MemFlags: T1,
        p: Value,
        e: Value,
        x: Value
    ) -> Value
    where
        T1: Into<MemFlags>
, { ... }
pub fn atomic_load<T1>(
        self,
        AtomicMem: Type,
        MemFlags: T1,
        p: Value
    ) -> Value
    where
        T1: Into<MemFlags>
, { ... }
pub fn atomic_store<T1>(self, MemFlags: T1, x: Value, p: Value) -> Inst
    where
        T1: Into<MemFlags>
, { ... }
pub fn fence(self) -> Inst { ... }
pub fn x86_udivmodx(
        self,
        nlo: Value,
        nhi: Value,
        d: Value
    ) -> (Value, Value) { ... }
pub fn x86_sdivmodx(
        self,
        nlo: Value,
        nhi: Value,
        d: Value
    ) -> (Value, Value) { ... }
pub fn x86_umulx(self, argL: Value, argR: Value) -> (Value, Value) { ... }
pub fn x86_smulx(self, argL: Value, argR: Value) -> (Value, Value) { ... }
pub fn x86_cvtt2si(self, IntTo: Type, x: Value) -> Value { ... }
pub fn x86_vcvtudq2ps(self, x: Value) -> Value { ... }
pub fn x86_fmin(self, x: Value, y: Value) -> Value { ... }
pub fn x86_fmax(self, x: Value, y: Value) -> Value { ... }
pub fn x86_push(self, x: Value) -> Inst { ... }
pub fn x86_pop(self, iWord: Type) -> Value { ... }
pub fn x86_bsr(self, x: Value) -> (Value, Value) { ... }
pub fn x86_bsf(self, x: Value) -> (Value, Value) { ... }
pub fn x86_pshufd<T1>(self, a: Value, i: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn x86_pshufb(self, a: Value, b: Value) -> Value { ... }
pub fn x86_pblendw<T1>(self, a: Value, b: Value, mask: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn x86_pextr<T1>(self, x: Value, Idx: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn x86_pinsr<T1>(self, x: Value, y: Value, Idx: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn x86_insertps<T1>(self, x: Value, y: Value, Idx: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn x86_punpckh(self, x: Value, y: Value) -> Value { ... }
pub fn x86_punpckl(self, x: Value, y: Value) -> Value { ... }
pub fn x86_movsd(self, x: Value, y: Value) -> Value { ... }
pub fn x86_movlhps(self, x: Value, y: Value) -> Value { ... }
pub fn x86_psll(self, x: Value, y: Value) -> Value { ... }
pub fn x86_psrl(self, x: Value, y: Value) -> Value { ... }
pub fn x86_psra(self, x: Value, y: Value) -> Value { ... }
pub fn x86_pmullq(self, x: Value, y: Value) -> Value { ... }
pub fn x86_pmuludq(self, x: Value, y: Value) -> Value { ... }
pub fn x86_ptest(self, x: Value, y: Value) -> Value { ... }
pub fn x86_pmaxs(self, x: Value, y: Value) -> Value { ... }
pub fn x86_pmaxu(self, x: Value, y: Value) -> Value { ... }
pub fn x86_pmins(self, x: Value, y: Value) -> Value { ... }
pub fn x86_pminu(self, x: Value, y: Value) -> Value { ... }
pub fn x86_palignr<T1>(self, x: Value, y: Value, c: T1) -> Value
    where
        T1: Into<u8>
, { ... }
pub fn x86_elf_tls_get_addr(self, GV: GlobalValue) -> Value { ... }
pub fn x86_macho_tls_get_addr(self, GV: GlobalValue) -> Value { ... }
pub fn AtomicCas(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        arg0: Value,
        arg1: Value,
        arg2: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn AtomicRmw(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        op: AtomicRmwOp,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Binary(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BinaryImm64(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: Imm64,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BinaryImm8(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: u8,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Branch(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        destination: Block,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BranchFloat(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: FloatCC,
        destination: Block,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BranchIcmp(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        destination: Block,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BranchInt(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        destination: Block,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BranchTable(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        destination: Block,
        table: JumpTable,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BranchTableBase(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        table: JumpTable
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn BranchTableEntry(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: u8,
        table: JumpTable,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Call(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        func_ref: FuncRef,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn CallIndirect(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        sig_ref: SigRef,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn CondTrap(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        code: TrapCode,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn CopySpecial(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        src: u16,
        dst: u16
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn CopyToSsa(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        src: u16
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn FloatCompare(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: FloatCC,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn FloatCond(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: FloatCC,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn FloatCondTrap(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: FloatCC,
        code: TrapCode,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn FuncAddr(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        func_ref: FuncRef
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn HeapAddr(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        heap: Heap,
        imm: Uimm32,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn IndirectJump(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        table: JumpTable,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn IntCompare(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn IntCompareImm(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        imm: Imm64,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn IntCond(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn IntCondTrap(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        code: TrapCode,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn IntSelect(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        cond: IntCC,
        arg0: Value,
        arg1: Value,
        arg2: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Jump(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        destination: Block,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Load(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        offset: Offset32,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn LoadComplex(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        offset: Offset32,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn LoadNoOffset(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn MultiAry(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn NullAry(
        self,
        opcode: Opcode,
        ctrl_typevar: Type
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn RegFill(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        src: StackSlot,
        dst: u16,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn RegMove(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        src: u16,
        dst: u16,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn RegSpill(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        src: u16,
        dst: StackSlot,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Shuffle(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        mask: Immediate,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn StackLoad(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        stack_slot: StackSlot,
        offset: Offset32
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn StackStore(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        stack_slot: StackSlot,
        offset: Offset32,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Store(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        offset: Offset32,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn StoreComplex(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        offset: Offset32,
        args: EntityList<Value>
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn StoreNoOffset(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        flags: MemFlags,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn TableAddr(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        table: Table,
        offset: Offset32,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Ternary(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        arg0: Value,
        arg1: Value,
        arg2: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn TernaryImm8(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: u8,
        arg0: Value,
        arg1: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Trap(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        code: TrapCode
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn Unary(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        arg0: Value
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn UnaryBool(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: bool
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn UnaryConst(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        constant_handle: Constant
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn UnaryGlobalValue(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        global_value: GlobalValue
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn UnaryIeee32(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: Ieee32
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn UnaryIeee64(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: Ieee64
    ) -> (Inst, &'f mut DataFlowGraph) { ... }
pub fn UnaryImm(
        self,
        opcode: Opcode,
        ctrl_typevar: Type,
        imm: Imm64
    ) -> (Inst, &'f mut DataFlowGraph) { ... } }

Convenience methods for building instructions.

The InstBuilder trait has one method per instruction opcode for conveniently constructing the instruction with minimum arguments. Polymorphic instructions infer their result types from the input arguments when possible. In some cases, an explicit ctrl_typevar argument is required.

The opcode methods return the new instruction’s result values, or the Inst itself for instructions that don’t have any results.

There is also a method per instruction format. These methods all return an Inst.

Provided methods

pub fn jump(self, block: Block, args: &[Value]) -> Inst[src]

Jump.

Unconditionally jump to a basic block, passing the specified block arguments. The number and types of arguments must match the destination block.

Inputs:

  • block: Destination basic block
  • args: block arguments

pub fn fallthrough(self, block: Block, args: &[Value]) -> Inst[src]

Fall through to the next block.

This is the same as jump, except the destination block must be the next one in the layout.

Jumps are turned into fall-through instructions by the branch relaxation pass. There is no reason to use this instruction outside that pass.

Inputs:

  • block: Destination basic block
  • args: block arguments

pub fn brz(self, c: Value, block: Block, args: &[Value]) -> Inst[src]

Branch when zero.

If c is a b1 value, take the branch when c is false. If c is an integer value, take the branch when c = 0.

Inputs:

  • c: Controlling value to test
  • block: Destination basic block
  • args: block arguments

pub fn brnz(self, c: Value, block: Block, args: &[Value]) -> Inst[src]

Branch when non-zero.

If c is a b1 value, take the branch when c is true. If c is an integer value, take the branch when c != 0.

Inputs:

  • c: Controlling value to test
  • block: Destination basic block
  • args: block arguments

pub fn br_icmp<T1>(
    self,
    Cond: T1,
    x: Value,
    y: Value,
    block: Block,
    args: &[Value]
) -> Inst where
    T1: Into<IntCC>, 
[src]

Compare scalar integers and branch.

Compare x and y in the same way as the icmp instruction and take the branch if the condition is true:

    br_icmp ugt v1, v2, block4(v5, v6)

is semantically equivalent to:

    v10 = icmp ugt, v1, v2
    brnz v10, block4(v5, v6)

Some RISC architectures like MIPS and RISC-V provide instructions that implement all or some of the condition codes. The instruction can also be used to represent macro-op fusion on architectures like Intel’s.

Inputs:

  • Cond: An integer comparison condition code.
  • x: A scalar integer type
  • y: A scalar integer type
  • block: Destination basic block
  • args: block arguments

pub fn brif<T1>(self, Cond: T1, f: Value, block: Block, args: &[Value]) -> Inst where
    T1: Into<IntCC>, 
[src]

Branch when condition is true in integer CPU flags.

Inputs:

  • Cond: An integer comparison condition code.
  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.
  • block: Destination basic block
  • args: block arguments

pub fn brff<T1>(self, Cond: T1, f: Value, block: Block, args: &[Value]) -> Inst where
    T1: Into<FloatCC>, 
[src]

Branch when condition is true in floating point CPU flags.

Inputs:

  • Cond: A floating point comparison condition code
  • f: CPU flags representing the result of a floating point comparison. These flags can be tested with a :type:floatcc condition code.
  • block: Destination basic block
  • args: block arguments

pub fn br_table(self, x: Value, block: Block, JT: JumpTable) -> Inst[src]

Indirect branch via jump table.

Use x as an unsigned index into the jump table JT. If a jump table entry is found, branch to the corresponding block. If no entry was found or the index is out-of-bounds, branch to the given default block.

Note that this branch instruction can’t pass arguments to the targeted blocks. Split critical edges as needed to work around this.

Do not confuse this with “tables” in WebAssembly. br_table is for jump tables with destinations within the current function only – think of a match in Rust or a switch in C. If you want to call a function in a dynamic library, that will typically use call_indirect.

Inputs:

  • x: index into jump table
  • block: Destination basic block
  • JT: A jump table.

pub fn jump_table_entry<T1>(
    self,
    x: Value,
    addr: Value,
    Size: T1,
    JT: JumpTable
) -> Value where
    T1: Into<u8>, 
[src]

Get an entry from a jump table.

Load a serialized entry from a jump table JT at a given index addr with a specific Size. The retrieved entry may need to be decoded after loading, depending upon the jump table type used.

Currently, the only type supported is entries which are relative to the base of the jump table.

Inputs:

  • x: index into jump table
  • addr: An integer address type
  • Size: Size in bytes
  • JT: A jump table.

Outputs:

  • entry: entry of jump table

pub fn jump_table_base(self, iAddr: Type, JT: JumpTable) -> Value[src]

Get the absolute base address of a jump table.

This is used for jump tables wherein the entries are stored relative to the base of jump table. In order to use these, generated code should first load an entry using jump_table_entry, then use this instruction to add the relative base back to it.

Inputs:

  • iAddr (controlling type variable): An integer address type
  • JT: A jump table.

Outputs:

  • addr: An integer address type

pub fn indirect_jump_table_br(self, addr: Value, JT: JumpTable) -> Inst[src]

Branch indirectly via a jump table entry.

Unconditionally jump via a jump table entry that was previously loaded with the jump_table_entry instruction.

Inputs:

  • addr: An integer address type
  • JT: A jump table.

pub fn debugtrap(self) -> Inst[src]

Encodes an assembly debug trap.

pub fn trap<T1>(self, code: T1) -> Inst where
    T1: Into<TrapCode>, 
[src]

Terminate execution unconditionally.

Inputs:

  • code: A trap reason code.

pub fn trapz<T1>(self, c: Value, code: T1) -> Inst where
    T1: Into<TrapCode>, 
[src]

Trap when zero.

if c is non-zero, execution continues at the following instruction.

Inputs:

  • c: Controlling value to test
  • code: A trap reason code.

pub fn resumable_trap<T1>(self, code: T1) -> Inst where
    T1: Into<TrapCode>, 
[src]

A resumable trap.

This instruction allows non-conditional traps to be used as non-terminal instructions.

Inputs:

  • code: A trap reason code.

pub fn trapnz<T1>(self, c: Value, code: T1) -> Inst where
    T1: Into<TrapCode>, 
[src]

Trap when non-zero.

If c is zero, execution continues at the following instruction.

Inputs:

  • c: Controlling value to test
  • code: A trap reason code.

pub fn resumable_trapnz<T1>(self, c: Value, code: T1) -> Inst where
    T1: Into<TrapCode>, 
[src]

A resumable trap to be called when the passed condition is non-zero.

If c is zero, execution continues at the following instruction.

Inputs:

  • c: Controlling value to test
  • code: A trap reason code.

pub fn trapif<T1, T2>(self, Cond: T1, f: Value, code: T2) -> Inst where
    T1: Into<IntCC>,
    T2: Into<TrapCode>, 
[src]

Trap when condition is true in integer CPU flags.

Inputs:

  • Cond: An integer comparison condition code.
  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.
  • code: A trap reason code.

pub fn trapff<T1, T2>(self, Cond: T1, f: Value, code: T2) -> Inst where
    T1: Into<FloatCC>,
    T2: Into<TrapCode>, 
[src]

Trap when condition is true in floating point CPU flags.

Inputs:

  • Cond: A floating point comparison condition code
  • f: CPU flags representing the result of a floating point comparison. These flags can be tested with a :type:floatcc condition code.
  • code: A trap reason code.

pub fn return_(self, rvals: &[Value]) -> Inst[src]

Return from the function.

Unconditionally transfer control to the calling function, passing the provided return values. The list of return values must match the function signature’s return types.

Inputs:

  • rvals: return values

pub fn fallthrough_return(self, rvals: &[Value]) -> Inst[src]

Return from the function by fallthrough.

This is a specialized instruction for use where one wants to append a custom epilogue, which will then perform the real return. This instruction has no encoding.

Inputs:

  • rvals: return values

pub fn call(self, FN: FuncRef, args: &[Value]) -> Inst[src]

Direct function call.

Call a function which has been declared in the preamble. The argument types must match the function’s signature.

Inputs:

  • FN: function to call, declared by function
  • args: call arguments

Outputs:

  • rvals: return values

pub fn call_indirect(self, SIG: SigRef, callee: Value, args: &[Value]) -> Inst[src]

Indirect function call.

Call the function pointed to by callee with the given arguments. The called function must match the specified signature.

Note that this is different from WebAssembly’s call_indirect; the callee is a native address, rather than a table index. For WebAssembly, table_addr and load are used to obtain a native address from a table.

Inputs:

  • SIG: function signature
  • callee: address of function to call
  • args: call arguments

Outputs:

  • rvals: return values

pub fn func_addr(self, iAddr: Type, FN: FuncRef) -> Value[src]

Get the address of a function.

Compute the absolute address of a function declared in the preamble. The returned address can be used as a callee argument to call_indirect. This is also a method for calling functions that are too far away to be addressable by a direct call instruction.

Inputs:

  • iAddr (controlling type variable): An integer address type
  • FN: function to call, declared by function

Outputs:

  • addr: An integer address type

pub fn splat(self, TxN: Type, x: Value) -> Value[src]

Vector splat.

Return a vector whose lanes are all x.

Inputs:

  • TxN (controlling type variable): A SIMD vector type
  • x: Value to splat to all lanes

Outputs:

  • a: A SIMD vector type

pub fn swizzle(self, TxN: Type, x: Value, y: Value) -> Value[src]

Vector swizzle.

Returns a new vector with byte-width lanes selected from the lanes of the first input vector x specified in the second input vector s. The indices i in range [0, 15] select the i-th element of x. For indices outside of the range the resulting lane is 0. Note that this operates on byte-width lanes.

Inputs:

  • TxN (controlling type variable): A SIMD vector type
  • x: Vector to modify by re-arranging lanes
  • y: Mask for re-arranging lanes

Outputs:

  • a: A SIMD vector type

pub fn insertlane<T1>(self, x: Value, y: Value, Idx: T1) -> Value where
    T1: Into<u8>, 
[src]

Insert y as lane Idx in x.

The lane index, Idx, is an immediate value, not an SSA value. It must indicate a valid lane index for the type of x.

Inputs:

  • x: The vector to modify
  • y: New lane value
  • Idx: Lane index

Outputs:

  • a: A SIMD vector type

pub fn extractlane<T1>(self, x: Value, Idx: T1) -> Value where
    T1: Into<u8>, 
[src]

Extract lane Idx from x.

The lane index, Idx, is an immediate value, not an SSA value. It must indicate a valid lane index for the type of x. Note that the upper bits of a may or may not be zeroed depending on the ISA but the type system should prevent using a as anything other than the extracted value.

Inputs:

  • x: A SIMD vector type
  • Idx: Lane index

Outputs:

  • a:

pub fn imin(self, x: Value, y: Value) -> Value[src]

Signed integer minimum.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn umin(self, x: Value, y: Value) -> Value[src]

Unsigned integer minimum.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn imax(self, x: Value, y: Value) -> Value[src]

Signed integer maximum.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn umax(self, x: Value, y: Value) -> Value[src]

Unsigned integer maximum.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn avg_round(self, x: Value, y: Value) -> Value[src]

Unsigned average with rounding: a := (x + y + 1) // 2

Inputs:

  • x: A SIMD vector type containing integers
  • y: A SIMD vector type containing integers

Outputs:

  • a: A SIMD vector type containing integers

pub fn load<T1, T2>(
    self,
    Mem: Type,
    MemFlags: T1,
    p: Value,
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load from memory at p + Offset.

This is a polymorphic instruction that can load any value type which has a memory representation.

Inputs:

  • Mem (controlling type variable): Any type that can be stored in memory
  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn load_complex<T1, T2>(
    self,
    Mem: Type,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load from memory at sum(args) + Offset.

This is a polymorphic instruction that can load any value type which has a memory representation.

Inputs:

  • Mem (controlling type variable): Any type that can be stored in memory
  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn store<T1, T2>(self, MemFlags: T1, x: Value, p: Value, Offset: T2) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store x to memory at p + Offset.

This is a polymorphic instruction that can store any value type with a memory representation.

Inputs:

  • MemFlags: Memory operation flags
  • x: Value to be stored
  • p: An integer address type
  • Offset: Byte offset from base address

pub fn store_complex<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    args: &[Value],
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store x to memory at sum(args) + Offset.

This is a polymorphic instruction that can store any value type with a memory representation.

Inputs:

  • MemFlags: Memory operation flags
  • x: Value to be stored
  • args: Address arguments
  • Offset: Byte offset from base address

pub fn uload8<T1, T2>(
    self,
    iExt8: Type,
    MemFlags: T1,
    p: Value,
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 8 bits from memory at p + Offset and zero-extend.

This is equivalent to load.i8 followed by uextend.

Inputs:

  • iExt8 (controlling type variable): An integer type with more than 8 bits
  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 8 bits

pub fn uload8_complex<T1, T2>(
    self,
    iExt8: Type,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 8 bits from memory at sum(args) + Offset and zero-extend.

This is equivalent to load.i8 followed by uextend.

Inputs:

  • iExt8 (controlling type variable): An integer type with more than 8 bits
  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 8 bits

pub fn sload8<T1, T2>(
    self,
    iExt8: Type,
    MemFlags: T1,
    p: Value,
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 8 bits from memory at p + Offset and sign-extend.

This is equivalent to load.i8 followed by sextend.

Inputs:

  • iExt8 (controlling type variable): An integer type with more than 8 bits
  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 8 bits

pub fn sload8_complex<T1, T2>(
    self,
    iExt8: Type,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 8 bits from memory at sum(args) + Offset and sign-extend.

This is equivalent to load.i8 followed by sextend.

Inputs:

  • iExt8 (controlling type variable): An integer type with more than 8 bits
  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 8 bits

pub fn istore8<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    p: Value,
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store the low 8 bits of x to memory at p + Offset.

This is equivalent to ireduce.i8 followed by store.i8.

Inputs:

  • MemFlags: Memory operation flags
  • x: An integer type with more than 8 bits
  • p: An integer address type
  • Offset: Byte offset from base address

pub fn istore8_complex<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    args: &[Value],
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store the low 8 bits of x to memory at sum(args) + Offset.

This is equivalent to ireduce.i8 followed by store.i8.

Inputs:

  • MemFlags: Memory operation flags
  • x: An integer type with more than 8 bits
  • args: Address arguments
  • Offset: Byte offset from base address

pub fn uload16<T1, T2>(
    self,
    iExt16: Type,
    MemFlags: T1,
    p: Value,
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 16 bits from memory at p + Offset and zero-extend.

This is equivalent to load.i16 followed by uextend.

Inputs:

  • iExt16 (controlling type variable): An integer type with more than 16 bits
  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 16 bits

pub fn uload16_complex<T1, T2>(
    self,
    iExt16: Type,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 16 bits from memory at sum(args) + Offset and zero-extend.

This is equivalent to load.i16 followed by uextend.

Inputs:

  • iExt16 (controlling type variable): An integer type with more than 16 bits
  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 16 bits

pub fn sload16<T1, T2>(
    self,
    iExt16: Type,
    MemFlags: T1,
    p: Value,
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 16 bits from memory at p + Offset and sign-extend.

This is equivalent to load.i16 followed by sextend.

Inputs:

  • iExt16 (controlling type variable): An integer type with more than 16 bits
  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 16 bits

pub fn sload16_complex<T1, T2>(
    self,
    iExt16: Type,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 16 bits from memory at sum(args) + Offset and sign-extend.

This is equivalent to load.i16 followed by sextend.

Inputs:

  • iExt16 (controlling type variable): An integer type with more than 16 bits
  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 16 bits

pub fn istore16<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    p: Value,
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store the low 16 bits of x to memory at p + Offset.

This is equivalent to ireduce.i16 followed by store.i16.

Inputs:

  • MemFlags: Memory operation flags
  • x: An integer type with more than 16 bits
  • p: An integer address type
  • Offset: Byte offset from base address

pub fn istore16_complex<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    args: &[Value],
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store the low 16 bits of x to memory at sum(args) + Offset.

This is equivalent to ireduce.i16 followed by store.i16.

Inputs:

  • MemFlags: Memory operation flags
  • x: An integer type with more than 16 bits
  • args: Address arguments
  • Offset: Byte offset from base address

pub fn uload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 32 bits from memory at p + Offset and zero-extend.

This is equivalent to load.i32 followed by uextend.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 32 bits

pub fn uload32_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 32 bits from memory at sum(args) + Offset and zero-extend.

This is equivalent to load.i32 followed by uextend.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 32 bits

pub fn sload32<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 32 bits from memory at p + Offset and sign-extend.

This is equivalent to load.i32 followed by sextend.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 32 bits

pub fn sload32_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load 32 bits from memory at sum(args) + Offset and sign-extend.

This is equivalent to load.i32 followed by sextend.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: An integer type with more than 32 bits

pub fn istore32<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    p: Value,
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store the low 32 bits of x to memory at p + Offset.

This is equivalent to ireduce.i32 followed by store.i32.

Inputs:

  • MemFlags: Memory operation flags
  • x: An integer type with more than 32 bits
  • p: An integer address type
  • Offset: Byte offset from base address

pub fn istore32_complex<T1, T2>(
    self,
    MemFlags: T1,
    x: Value,
    args: &[Value],
    Offset: T2
) -> Inst where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Store the low 32 bits of x to memory at sum(args) + Offset.

This is equivalent to ireduce.i32 followed by store.i32.

Inputs:

  • MemFlags: Memory operation flags
  • x: An integer type with more than 32 bits
  • args: Address arguments
  • Offset: Byte offset from base address

pub fn uload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load an 8x8 vector (64 bits) from memory at p + Offset and zero-extend into an i16x8 vector.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn uload8x8_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load an 8x8 vector (64 bits) from memory at sum(args) + Offset and zero-extend into an i16x8 vector.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn sload8x8<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load an 8x8 vector (64 bits) from memory at p + Offset and sign-extend into an i16x8 vector.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn sload8x8_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load an 8x8 vector (64 bits) from memory at sum(args) + Offset and sign-extend into an i16x8 vector.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn uload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 16x4 vector (64 bits) from memory at p + Offset and zero-extend into an i32x4 vector.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn uload16x4_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 16x4 vector (64 bits) from memory at sum(args) + Offset and zero-extend into an i32x4 vector.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn sload16x4<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 16x4 vector (64 bits) from memory at p + Offset and sign-extend into an i32x4 vector.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn sload16x4_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 16x4 vector (64 bits) from memory at sum(args) + Offset and sign-extend into an i32x4 vector.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn uload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load an 32x2 vector (64 bits) from memory at p + Offset and zero-extend into an i64x2 vector.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn uload32x2_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 32x2 vector (64 bits) from memory at sum(args) + Offset and zero-extend into an i64x2 vector.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn sload32x2<T1, T2>(self, MemFlags: T1, p: Value, Offset: T2) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 32x2 vector (64 bits) from memory at p + Offset and sign-extend into an i64x2 vector.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn sload32x2_complex<T1, T2>(
    self,
    MemFlags: T1,
    args: &[Value],
    Offset: T2
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<Offset32>, 
[src]

Load a 32x2 vector (64 bits) from memory at sum(args) + Offset and sign-extend into an i64x2 vector.

Inputs:

  • MemFlags: Memory operation flags
  • args: Address arguments
  • Offset: Byte offset from base address

Outputs:

  • a: Value loaded

pub fn stack_load<T1>(self, Mem: Type, SS: StackSlot, Offset: T1) -> Value where
    T1: Into<Offset32>, 
[src]

Load a value from a stack slot at the constant offset.

This is a polymorphic instruction that can load any value type which has a memory representation.

The offset is an immediate constant, not an SSA value. The memory access cannot go out of bounds, i.e. sizeof(a) + Offset <= sizeof(SS).

Inputs:

  • Mem (controlling type variable): Any type that can be stored in memory
  • SS: A stack slot
  • Offset: In-bounds offset into stack slot

Outputs:

  • a: Value loaded

pub fn stack_store<T1>(self, x: Value, SS: StackSlot, Offset: T1) -> Inst where
    T1: Into<Offset32>, 
[src]

Store a value to a stack slot at a constant offset.

This is a polymorphic instruction that can store any value type with a memory representation.

The offset is an immediate constant, not an SSA value. The memory access cannot go out of bounds, i.e. sizeof(a) + Offset <= sizeof(SS).

Inputs:

  • x: Value to be stored
  • SS: A stack slot
  • Offset: In-bounds offset into stack slot

pub fn stack_addr<T1>(self, iAddr: Type, SS: StackSlot, Offset: T1) -> Value where
    T1: Into<Offset32>, 
[src]

Get the address of a stack slot.

Compute the absolute address of a byte in a stack slot. The offset must refer to a byte inside the stack slot: 0 <= Offset < sizeof(SS).

Inputs:

  • iAddr (controlling type variable): An integer address type
  • SS: A stack slot
  • Offset: In-bounds offset into stack slot

Outputs:

  • addr: An integer address type

pub fn global_value(self, Mem: Type, GV: GlobalValue) -> Value[src]

Compute the value of global GV.

Inputs:

  • Mem (controlling type variable): Any type that can be stored in memory
  • GV: A global value.

Outputs:

  • a: Value loaded

pub fn symbol_value(self, Mem: Type, GV: GlobalValue) -> Value[src]

Compute the value of global GV, which is a symbolic value.

Inputs:

  • Mem (controlling type variable): Any type that can be stored in memory
  • GV: A global value.

Outputs:

  • a: Value loaded

pub fn tls_value(self, Mem: Type, GV: GlobalValue) -> Value[src]

Compute the value of global GV, which is a TLS (thread local storage) value.

Inputs:

  • Mem (controlling type variable): Any type that can be stored in memory
  • GV: A global value.

Outputs:

  • a: Value loaded

pub fn heap_addr<T1>(self, iAddr: Type, H: Heap, p: Value, Size: T1) -> Value where
    T1: Into<Uimm32>, 
[src]

Bounds check and compute absolute address of heap memory.

Verify that the offset range p .. p + Size - 1 is in bounds for the heap H, and generate an absolute address that is safe to dereference.

  1. If p + Size is not greater than the heap bound, return an absolute address corresponding to a byte offset of p from the heap’s base address.
  2. If p + Size is greater than the heap bound, generate a trap.

Inputs:

  • iAddr (controlling type variable): An integer address type
  • H: A heap.
  • p: An unsigned heap offset
  • Size: Size in bytes

Outputs:

  • addr: An integer address type

pub fn get_pinned_reg(self, iAddr: Type) -> Value[src]

Gets the content of the pinned register, when it’s enabled.

Inputs:

  • iAddr (controlling type variable): An integer address type

Outputs:

  • addr: An integer address type

pub fn set_pinned_reg(self, addr: Value) -> Inst[src]

Sets the content of the pinned register, when it’s enabled.

Inputs:

  • addr: An integer address type

pub fn table_addr<T1>(
    self,
    iAddr: Type,
    T: Table,
    p: Value,
    Offset: T1
) -> Value where
    T1: Into<Offset32>, 
[src]

Bounds check and compute absolute address of a table entry.

Verify that the offset p is in bounds for the table T, and generate an absolute address that is safe to dereference.

Offset must be less than the size of a table element.

  1. If p is not greater than the table bound, return an absolute address corresponding to a byte offset of p from the table’s base address.
  2. If p is greater than the table bound, generate a trap.

Inputs:

  • iAddr (controlling type variable): An integer address type
  • T: A table.
  • p: An unsigned table offset
  • Offset: Byte offset from element address

Outputs:

  • addr: An integer address type

pub fn iconst<T1>(self, Int: Type, N: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Integer constant.

Create a scalar integer SSA value with an immediate constant value, or an integer vector where all the lanes have the same value.

Inputs:

  • Int (controlling type variable): A scalar or vector integer type
  • N: A 64-bit immediate integer.

Outputs:

  • a: A constant integer scalar or vector value

pub fn f32const<T1>(self, N: T1) -> Value where
    T1: Into<Ieee32>, 
[src]

Floating point constant.

Create a f32 SSA value with an immediate constant value.

Inputs:

  • N: A 32-bit immediate floating point number.

Outputs:

  • a: A constant f32 scalar value

pub fn f64const<T1>(self, N: T1) -> Value where
    T1: Into<Ieee64>, 
[src]

Floating point constant.

Create a f64 SSA value with an immediate constant value.

Inputs:

  • N: A 64-bit immediate floating point number.

Outputs:

  • a: A constant f64 scalar value

pub fn bconst<T1>(self, Bool: Type, N: T1) -> Value where
    T1: Into<bool>, 
[src]

Boolean constant.

Create a scalar boolean SSA value with an immediate constant value, or a boolean vector where all the lanes have the same value.

Inputs:

  • Bool (controlling type variable): A scalar or vector boolean type
  • N: An immediate boolean.

Outputs:

  • a: A constant boolean scalar or vector value

pub fn vconst<T1>(self, TxN: Type, N: T1) -> Value where
    T1: Into<Constant>, 
[src]

SIMD vector constant.

Construct a vector with the given immediate bytes.

Inputs:

  • TxN (controlling type variable): A SIMD vector type
  • N: The 16 immediate bytes of a 128-bit vector

Outputs:

  • a: A constant vector value

pub fn const_addr<T1>(self, iAddr: Type, constant: T1) -> Value where
    T1: Into<Constant>, 
[src]

Calculate the base address of a value in the constant pool.

Inputs:

  • iAddr (controlling type variable): An integer address type
  • constant: A constant in the constant pool

Outputs:

  • address: An integer address type

pub fn shuffle<T1>(self, a: Value, b: Value, mask: T1) -> Value where
    T1: Into<Immediate>, 
[src]

SIMD vector shuffle.

Shuffle two vectors using the given immediate bytes. For each of the 16 bytes of the immediate, a value i of 0-15 selects the i-th element of the first vector and a value i of 16-31 selects the (i-16)th element of the second vector. Immediate values outside of the 0-31 range place a 0 in the resulting vector lane.

Inputs:

  • a: A vector value
  • b: A vector value
  • mask: The 16 immediate bytes used for selecting the elements to shuffle

Outputs:

  • a: A vector value

pub fn null(self, Ref: Type) -> Value[src]

Null constant value for reference types.

Create a scalar reference SSA value with a constant null value.

Inputs:

  • Ref (controlling type variable): A scalar reference type

Outputs:

  • a: A constant reference null value

pub fn nop(self) -> Inst[src]

Just a dummy instruction.

Note: this doesn’t compile to a machine code nop.

pub fn select(self, c: Value, x: Value, y: Value) -> Value[src]

Conditional select.

This instruction selects whole values. Use vselect for lane-wise selection.

Inputs:

  • c: Controlling value to test
  • x: Value to use when c is true
  • y: Value to use when c is false

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn selectif<T1>(
    self,
    Any: Type,
    cc: T1,
    flags: Value,
    x: Value,
    y: Value
) -> Value where
    T1: Into<IntCC>, 
[src]

Conditional select, dependent on integer condition codes.

Inputs:

  • Any (controlling type variable): Any integer, float, boolean, or reference scalar or vector type
  • cc: Controlling condition code
  • flags: The machine’s flag register
  • x: Value to use when c is true
  • y: Value to use when c is false

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn selectif_spectre_guard<T1>(
    self,
    Any: Type,
    cc: T1,
    flags: Value,
    x: Value,
    y: Value
) -> Value where
    T1: Into<IntCC>, 
[src]

Conditional select intended for Spectre guards.

This operation is semantically equivalent to a selectif instruction. However, it is guaranteed to not be removed or otherwise altered by any optimization pass, and is guaranteed to result in a conditional-move instruction, not a branch-based lowering. As such, it is suitable for use when producing Spectre guards. For example, a bounds-check may guard against unsafe speculation past a bounds-check conditional branch by passing the address or index to be accessed through a conditional move, also gated on the same condition. Because no Spectre-vulnerable processors are known to perform speculation on conditional move instructions, this is guaranteed to pick the correct input. If the selected input in case of overflow is a “safe” value, for example a null pointer that causes an exception in the speculative path, this ensures that no Spectre vulnerability will exist.

Inputs:

  • Any (controlling type variable): Any integer, float, boolean, or reference scalar or vector type
  • cc: Controlling condition code
  • flags: The machine’s flag register
  • x: Value to use when c is true
  • y: Value to use when c is false

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn bitselect(self, c: Value, x: Value, y: Value) -> Value[src]

Conditional select of bits.

For each bit in c, this instruction selects the corresponding bit from x if the bit in c is 1 and the corresponding bit from y if the bit in c is 0. See also: select, vselect.

Inputs:

  • c: Controlling value to test
  • x: Value to use when c is true
  • y: Value to use when c is false

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn copy(self, x: Value) -> Value[src]

Register-register copy.

This instruction copies its input, preserving the value type.

A pure SSA-form program does not need to copy values, but this instruction is useful for representing intermediate stages during instruction transformations, and the register allocator needs a way of representing register copies.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn spill(self, x: Value) -> Value[src]

Spill a register value to a stack slot.

This instruction behaves exactly like copy, but the result value is assigned to a spill slot.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn fill(self, x: Value) -> Value[src]

Load a register value from a stack slot.

This instruction behaves exactly like copy, but creates a new SSA value for the spilled input value.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn fill_nop(self, x: Value) -> Value[src]

This is identical to fill, except it has no encoding, since it is a no-op.

This instruction is created only during late-stage redundant-reload removal, after all registers and stack slots have been assigned. It is used to replace fills that have been identified as redundant.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn dummy_sarg_t(self) -> Value[src]

This creates a sarg_t

This instruction is internal and should not be created by Cranelift users.

Outputs:

  • sarg_t: Any scalar or vector type with at most 128 lanes

pub fn regmove<T1, T2>(self, x: Value, src: T1, dst: T2) -> Inst where
    T1: Into<u16>,
    T2: Into<u16>, 
[src]

Temporarily divert x from src to dst.

This instruction moves the location of a value from one register to another without creating a new SSA value. It is used by the register allocator to temporarily rearrange register assignments in order to satisfy instruction constraints.

The register diversions created by this instruction must be undone before the value leaves the block. At the entry to a new block, all live values must be in their originally assigned registers.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type
  • src: A register unit in the target ISA
  • dst: A register unit in the target ISA

pub fn copy_special<T1, T2>(self, src: T1, dst: T2) -> Inst where
    T1: Into<u16>,
    T2: Into<u16>, 
[src]

Copies the contents of ’‘src’’ register to ’‘dst’’ register.

This instructions copies the contents of one register to another register without involving any SSA values. This is used for copying special registers, e.g. copying the stack register to the frame register in a function prologue.

Inputs:

  • src: A register unit in the target ISA
  • dst: A register unit in the target ISA

pub fn copy_to_ssa<T1>(self, Any: Type, src: T1) -> Value where
    T1: Into<u16>, 
[src]

Copies the contents of ’‘src’’ register to ’‘a’’ SSA name.

This instruction copies the contents of one register, regardless of its SSA name, to another register, creating a new SSA name. In that sense it is a one-sided version of ’‘copy_special’’. This instruction is internal and should not be created by Cranelift users.

Inputs:

  • Any (controlling type variable): Any integer, float, boolean, or reference scalar or vector type
  • src: A register unit in the target ISA

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn copy_nop(self, x: Value) -> Value[src]

Stack-slot-to-the-same-stack-slot copy, which is guaranteed to turn into a no-op. This instruction is for use only within Cranelift itself.

This instruction copies its input, preserving the value type.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type

Outputs:

  • a: Any integer, float, boolean, or reference scalar or vector type

pub fn adjust_sp_down(self, delta: Value) -> Inst[src]

Subtracts delta offset value from the stack pointer register.

This instruction is used to adjust the stack pointer by a dynamic amount.

Inputs:

  • delta: A scalar or vector integer type

pub fn adjust_sp_up_imm<T1>(self, Offset: T1) -> Inst where
    T1: Into<Imm64>, 
[src]

Adds Offset immediate offset value to the stack pointer register.

This instruction is used to adjust the stack pointer, primarily in function prologues and epilogues. Offset is constrained to the size of a signed 32-bit integer.

Inputs:

  • Offset: Offset from current stack pointer

pub fn adjust_sp_down_imm<T1>(self, Offset: T1) -> Inst where
    T1: Into<Imm64>, 
[src]

Subtracts Offset immediate offset value from the stack pointer register.

This instruction is used to adjust the stack pointer, primarily in function prologues and epilogues. Offset is constrained to the size of a signed 32-bit integer.

Inputs:

  • Offset: Offset from current stack pointer

pub fn ifcmp_sp(self, addr: Value) -> Value[src]

Compare addr with the stack pointer and set the CPU flags.

This is like ifcmp where addr is the LHS operand and the stack pointer is the RHS.

Inputs:

  • addr: An integer address type

Outputs:

  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn regspill<T1>(self, x: Value, src: T1, SS: StackSlot) -> Inst where
    T1: Into<u16>, 
[src]

Temporarily divert x from src to SS.

This instruction moves the location of a value from a register to a stack slot without creating a new SSA value. It is used by the register allocator to temporarily rearrange register assignments in order to satisfy instruction constraints.

See also regmove.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type
  • src: A register unit in the target ISA
  • SS: A stack slot

pub fn regfill<T1>(self, x: Value, SS: StackSlot, dst: T1) -> Inst where
    T1: Into<u16>, 
[src]

Temporarily divert x from SS to dst.

This instruction moves the location of a value from a stack slot to a register without creating a new SSA value. It is used by the register allocator to temporarily rearrange register assignments in order to satisfy instruction constraints.

See also regmove.

Inputs:

  • x: Any integer, float, boolean, or reference scalar or vector type
  • SS: A stack slot
  • dst: A register unit in the target ISA

pub fn safepoint(self, args: &[Value]) -> Inst[src]

This instruction will provide live reference values at a point in the function. It can only be used by the compiler.

Inputs:

  • args: Variable number of args for StackMap

pub fn vsplit(self, x: Value) -> (Value, Value)[src]

Split a vector into two halves.

Split the vector x into two separate values, each containing half of the lanes from x. The result may be two scalars if x only had two lanes.

Inputs:

  • x: Vector to split

Outputs:

  • lo: Low-numbered lanes of x
  • hi: High-numbered lanes of x

pub fn vconcat(self, x: Value, y: Value) -> Value[src]

Vector concatenation.

Return a vector formed by concatenating x and y. The resulting vector type has twice as many lanes as each of the inputs. The lanes of x appear as the low-numbered lanes, and the lanes of y become the high-numbered lanes of a.

It is possible to form a vector by concatenating two scalars.

Inputs:

  • x: Low-numbered lanes
  • y: High-numbered lanes

Outputs:

  • a: Concatenation of x and y

pub fn vselect(self, c: Value, x: Value, y: Value) -> Value[src]

Vector lane select.

Select lanes from x or y controlled by the lanes of the boolean vector c.

Inputs:

  • c: Controlling vector
  • x: Value to use where c is true
  • y: Value to use where c is false

Outputs:

  • a: A SIMD vector type

pub fn vany_true(self, a: Value) -> Value[src]

Reduce a vector to a scalar boolean.

Return a scalar boolean true if any lane in a is non-zero, false otherwise.

Inputs:

  • a: A SIMD vector type

Outputs:

  • s: A boolean type with 1 bits.

pub fn vall_true(self, a: Value) -> Value[src]

Reduce a vector to a scalar boolean.

Return a scalar boolean true if all lanes in i are non-zero, false otherwise.

Inputs:

  • a: A SIMD vector type

Outputs:

  • s: A boolean type with 1 bits.

pub fn vhigh_bits(self, Int: Type, a: Value) -> Value[src]

Reduce a vector to a scalar integer.

Return a scalar integer, consisting of the concatenation of the most significant bit of each lane of a.

Inputs:

  • Int (controlling type variable): A scalar or vector integer type
  • a: A SIMD vector type

Outputs:

  • x: A scalar or vector integer type

pub fn icmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value where
    T1: Into<IntCC>, 
[src]

Integer comparison.

The condition code determines if the operands are interpreted as signed or unsigned integers.

SignedUnsignedCondition
eqeqEqual
neneNot equal
sltultLess than
sgeugeGreater than or equal
sgtugtGreater than
sleuleLess than or equal
of*Overflow
nof*No Overflow

* The unsigned version of overflow conditions have ISA-specific semantics and thus have been kept as methods on the TargetIsa trait as unsigned_add_overflow_condition and unsigned_sub_overflow_condition.

When this instruction compares integer vectors, it returns a boolean vector of lane-wise comparisons.

Inputs:

  • Cond: An integer comparison condition code.
  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a:

pub fn icmp_imm<T1, T2>(self, Cond: T1, x: Value, Y: T2) -> Value where
    T1: Into<IntCC>,
    T2: Into<Imm64>, 
[src]

Compare scalar integer to a constant.

This is the same as the icmp instruction, except one operand is an immediate constant.

This instruction can only compare scalars. Use icmp for lane-wise vector comparisons.

Inputs:

  • Cond: An integer comparison condition code.
  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A boolean type with 1 bits.

pub fn ifcmp(self, x: Value, y: Value) -> Value[src]

Compare scalar integers and return flags.

Compare two scalar integer values and return integer CPU flags representing the result.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type

Outputs:

  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn ifcmp_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Compare scalar integer to a constant and return flags.

Like icmp_imm, but returns integer CPU flags instead of testing a specific condition code.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn iadd(self, x: Value, y: Value) -> Value[src]

Wrapping integer addition: a := x + y \pmod{2^B}.

This instruction does not depend on the signed/unsigned interpretation of the operands.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn uadd_sat(self, x: Value, y: Value) -> Value[src]

Add with unsigned saturation.

This is similar to iadd but the operands are interpreted as unsigned integers and their summed result, instead of wrapping, will be saturated to the highest unsigned integer for the controlling type (e.g. 0xFF for i8).

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn sadd_sat(self, x: Value, y: Value) -> Value[src]

Add with signed saturation.

This is similar to iadd but the operands are interpreted as signed integers and their summed result, instead of wrapping, will be saturated to the lowest or highest signed integer for the controlling type (e.g. 0x80 or 0x7F for i8). For example, since an sadd_sat.i8 of 0x70 and 0x70 is greater than 0x7F, the result will be clamped to 0x7F.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn isub(self, x: Value, y: Value) -> Value[src]

Wrapping integer subtraction: a := x - y \pmod{2^B}.

This instruction does not depend on the signed/unsigned interpretation of the operands.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn usub_sat(self, x: Value, y: Value) -> Value[src]

Subtract with unsigned saturation.

This is similar to isub but the operands are interpreted as unsigned integers and their difference, instead of wrapping, will be saturated to the lowest unsigned integer for the controlling type (e.g. 0x00 for i8).

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn ssub_sat(self, x: Value, y: Value) -> Value[src]

Subtract with signed saturation.

This is similar to isub but the operands are interpreted as signed integers and their difference, instead of wrapping, will be saturated to the lowest or highest signed integer for the controlling type (e.g. 0x80 or 0x7F for i8).

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn ineg(self, x: Value) -> Value[src]

Integer negation: a := -x \pmod{2^B}.

Inputs:

  • x: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn iabs(self, x: Value) -> Value[src]

Integer absolute value with wrapping: a := |x|.

Inputs:

  • x: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn imul(self, x: Value, y: Value) -> Value[src]

Wrapping integer multiplication: a := x y \pmod{2^B}.

This instruction does not depend on the signed/unsigned interpretation of the operands.

Polymorphic over all integer types (vector and scalar).

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn umulhi(self, x: Value, y: Value) -> Value[src]

Unsigned integer multiplication, producing the high half of a double-length result.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn smulhi(self, x: Value, y: Value) -> Value[src]

Signed integer multiplication, producing the high half of a double-length result.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn udiv(self, x: Value, y: Value) -> Value[src]

Unsigned integer division: a := \lfloor {x \over y} \rfloor.

This operation traps if the divisor is zero.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn sdiv(self, x: Value, y: Value) -> Value[src]

Signed integer division rounded toward zero: a := sign(xy) \lfloor {|x| \over |y|}\rfloor.

This operation traps if the divisor is zero, or if the result is not representable in B bits two’s complement. This only happens when x = -2^{B-1}, y = -1.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn urem(self, x: Value, y: Value) -> Value[src]

Unsigned integer remainder.

This operation traps if the divisor is zero.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn srem(self, x: Value, y: Value) -> Value[src]

Signed integer remainder. The result has the sign of the dividend.

This operation traps if the divisor is zero.

Inputs:

  • x: A scalar or vector integer type
  • y: A scalar or vector integer type

Outputs:

  • a: A scalar or vector integer type

pub fn iadd_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Add immediate integer.

Same as iadd, but one operand is an immediate constant.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn imul_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Integer multiplication by immediate constant.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn udiv_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Unsigned integer division by an immediate constant.

This operation traps if the divisor is zero.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn sdiv_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Signed integer division by an immediate constant.

This operation traps if the divisor is zero, or if the result is not representable in B bits two’s complement. This only happens when x = -2^{B-1}, Y = -1.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn urem_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Unsigned integer remainder with immediate divisor.

This operation traps if the divisor is zero.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn srem_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Signed integer remainder with immediate divisor.

This operation traps if the divisor is zero.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn irsub_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Immediate reverse wrapping subtraction: a := Y - x \pmod{2^B}.

Also works as integer negation when Y = 0. Use iadd_imm with a negative immediate operand for the reverse immediate subtraction.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn iadd_cin(self, x: Value, y: Value, c_in: Value) -> Value[src]

Add integers with carry in.

Same as iadd with an additional carry input. Computes:

    a = x + y + c_{in} \pmod 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • c_in: Input carry flag

Outputs:

  • a: A scalar integer type

pub fn iadd_ifcin(self, x: Value, y: Value, c_in: Value) -> Value[src]

Add integers with carry in.

Same as iadd with an additional carry flag input. Computes:

    a = x + y + c_{in} \pmod 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • c_in: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

Outputs:

  • a: A scalar integer type

pub fn iadd_cout(self, x: Value, y: Value) -> (Value, Value)[src]

Add integers with carry out.

Same as iadd with an additional carry output.

    a &= x + y \pmod 2^B \\
    c_{out} &= x+y >= 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type

Outputs:

  • a: A scalar integer type
  • c_out: Output carry flag

pub fn iadd_ifcout(self, x: Value, y: Value) -> (Value, Value)[src]

Add integers with carry out.

Same as iadd with an additional carry flag output.

    a &= x + y \pmod 2^B \\
    c_{out} &= x+y >= 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type

Outputs:

  • a: A scalar integer type
  • c_out: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn iadd_carry(self, x: Value, y: Value, c_in: Value) -> (Value, Value)[src]

Add integers with carry in and out.

Same as iadd with an additional carry input and output.

    a &= x + y + c_{in} \pmod 2^B \\
    c_{out} &= x + y + c_{in} >= 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • c_in: Input carry flag

Outputs:

  • a: A scalar integer type
  • c_out: Output carry flag

pub fn iadd_ifcarry(self, x: Value, y: Value, c_in: Value) -> (Value, Value)[src]

Add integers with carry in and out.

Same as iadd with an additional carry flag input and output.

    a &= x + y + c_{in} \pmod 2^B \\
    c_{out} &= x + y + c_{in} >= 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • c_in: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

Outputs:

  • a: A scalar integer type
  • c_out: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn isub_bin(self, x: Value, y: Value, b_in: Value) -> Value[src]

Subtract integers with borrow in.

Same as isub with an additional borrow flag input. Computes:

    a = x - (y + b_{in}) \pmod 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • b_in: Input borrow flag

Outputs:

  • a: A scalar integer type

pub fn isub_ifbin(self, x: Value, y: Value, b_in: Value) -> Value[src]

Subtract integers with borrow in.

Same as isub with an additional borrow flag input. Computes:

    a = x - (y + b_{in}) \pmod 2^B

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • b_in: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

Outputs:

  • a: A scalar integer type

pub fn isub_bout(self, x: Value, y: Value) -> (Value, Value)[src]

Subtract integers with borrow out.

Same as isub with an additional borrow flag output.

    a &= x - y \pmod 2^B \\
    b_{out} &= x < y

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type

Outputs:

  • a: A scalar integer type
  • b_out: Output borrow flag

pub fn isub_ifbout(self, x: Value, y: Value) -> (Value, Value)[src]

Subtract integers with borrow out.

Same as isub with an additional borrow flag output.

    a &= x - y \pmod 2^B \\
    b_{out} &= x < y

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type

Outputs:

  • a: A scalar integer type
  • b_out: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn isub_borrow(self, x: Value, y: Value, b_in: Value) -> (Value, Value)[src]

Subtract integers with borrow in and out.

Same as isub with an additional borrow flag input and output.

    a &= x - (y + b_{in}) \pmod 2^B \\
    b_{out} &= x < y + b_{in}

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • b_in: Input borrow flag

Outputs:

  • a: A scalar integer type
  • b_out: Output borrow flag

pub fn isub_ifborrow(self, x: Value, y: Value, b_in: Value) -> (Value, Value)[src]

Subtract integers with borrow in and out.

Same as isub with an additional borrow flag input and output.

    a &= x - (y + b_{in}) \pmod 2^B \\
    b_{out} &= x < y + b_{in}

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • y: A scalar integer type
  • b_in: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

Outputs:

  • a: A scalar integer type
  • b_out: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn band(self, x: Value, y: Value) -> Value[src]

Bitwise and.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type
  • y: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn bor(self, x: Value, y: Value) -> Value[src]

Bitwise or.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type
  • y: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn bxor(self, x: Value, y: Value) -> Value[src]

Bitwise xor.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type
  • y: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn bnot(self, x: Value) -> Value[src]

Bitwise not.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn band_not(self, x: Value, y: Value) -> Value[src]

Bitwise and not.

Computes x & ~y.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type
  • y: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn bor_not(self, x: Value, y: Value) -> Value[src]

Bitwise or not.

Computes x | ~y.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type
  • y: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn bxor_not(self, x: Value, y: Value) -> Value[src]

Bitwise xor not.

Computes x ^ ~y.

Inputs:

  • x: Any integer, float, or boolean scalar or vector type
  • y: Any integer, float, or boolean scalar or vector type

Outputs:

  • a: Any integer, float, or boolean scalar or vector type

pub fn band_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Bitwise and with immediate.

Same as band, but one operand is an immediate constant.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn bor_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Bitwise or with immediate.

Same as bor, but one operand is an immediate constant.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn bxor_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Bitwise xor with immediate.

Same as bxor, but one operand is an immediate constant.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • x: A scalar integer type
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar integer type

pub fn rotl(self, x: Value, y: Value) -> Value[src]

Rotate left.

Rotate the bits in x by y places.

Inputs:

  • x: Scalar or vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A scalar or vector integer type

pub fn rotr(self, x: Value, y: Value) -> Value[src]

Rotate right.

Rotate the bits in x by y places.

Inputs:

  • x: Scalar or vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A scalar or vector integer type

pub fn rotl_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Rotate left by immediate.

Inputs:

  • x: Scalar or vector value to shift
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar or vector integer type

pub fn rotr_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Rotate right by immediate.

Inputs:

  • x: Scalar or vector value to shift
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar or vector integer type

pub fn ishl(self, x: Value, y: Value) -> Value[src]

Integer shift left. Shift the bits in x towards the MSB by y places. Shift in zero bits to the LSB.

The shift amount is masked to the size of x.

When shifting a B-bits integer type, this instruction computes:

    s &:= y \pmod B,
    a &:= x \cdot 2^s \pmod{2^B}.

Inputs:

  • x: Scalar or vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A scalar or vector integer type

pub fn ushr(self, x: Value, y: Value) -> Value[src]

Unsigned shift right. Shift bits in x towards the LSB by y places, shifting in zero bits to the MSB. Also called a logical shift.

The shift amount is masked to the size of the register.

When shifting a B-bits integer type, this instruction computes:

    s &:= y \pmod B,
    a &:= \lfloor x \cdot 2^{-s} \rfloor.

Inputs:

  • x: Scalar or vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A scalar or vector integer type

pub fn sshr(self, x: Value, y: Value) -> Value[src]

Signed shift right. Shift bits in x towards the LSB by y places, shifting in sign bits to the MSB. Also called an arithmetic shift.

The shift amount is masked to the size of the register.

Inputs:

  • x: Scalar or vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A scalar or vector integer type

pub fn ishl_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Integer shift left by immediate.

The shift amount is masked to the size of x.

Inputs:

  • x: Scalar or vector value to shift
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar or vector integer type

pub fn ushr_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Unsigned shift right by immediate.

The shift amount is masked to the size of the register.

Inputs:

  • x: Scalar or vector value to shift
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar or vector integer type

pub fn sshr_imm<T1>(self, x: Value, Y: T1) -> Value where
    T1: Into<Imm64>, 
[src]

Signed shift right by immediate.

The shift amount is masked to the size of the register.

Inputs:

  • x: Scalar or vector value to shift
  • Y: A 64-bit immediate integer.

Outputs:

  • a: A scalar or vector integer type

pub fn bitrev(self, x: Value) -> Value[src]

Reverse the bits of a integer.

Reverses the bits in x.

Inputs:

  • x: A scalar integer type

Outputs:

  • a: A scalar integer type

pub fn clz(self, x: Value) -> Value[src]

Count leading zero bits.

Starting from the MSB in x, count the number of zero bits before reaching the first one bit. When x is zero, returns the size of x in bits.

Inputs:

  • x: A scalar integer type

Outputs:

  • a: A scalar integer type

pub fn cls(self, x: Value) -> Value[src]

Count leading sign bits.

Starting from the MSB after the sign bit in x, count the number of consecutive bits identical to the sign bit. When x is 0 or -1, returns one less than the size of x in bits.

Inputs:

  • x: A scalar integer type

Outputs:

  • a: A scalar integer type

pub fn ctz(self, x: Value) -> Value[src]

Count trailing zeros.

Starting from the LSB in x, count the number of zero bits before reaching the first one bit. When x is zero, returns the size of x in bits.

Inputs:

  • x: A scalar integer type

Outputs:

  • a: A scalar integer type

pub fn popcnt(self, x: Value) -> Value[src]

Population count

Count the number of one bits in x.

Inputs:

  • x: A scalar integer type

Outputs:

  • a: A scalar integer type

pub fn fcmp<T1>(self, Cond: T1, x: Value, y: Value) -> Value where
    T1: Into<FloatCC>, 
[src]

Floating point comparison.

Two IEEE 754-2008 floating point numbers, x and y, relate to each other in exactly one of four ways:

== ========================================== UN Unordered when one or both numbers is NaN. EQ When x = y. (And 0.0 = -0.0). LT When x < y. GT When x > y. == ==========================================

The 14 floatcc condition codes each correspond to a subset of the four relations, except for the empty set which would always be false, and the full set which would always be true.

The condition codes are divided into 7 ‘ordered’ conditions which don’t include UN, and 7 unordered conditions which all include UN.

+—––+————+———+————+———————––+ |Ordered |Unordered |Condition | +=======+============+=========+============+=========================+ |ord |EQ | LT | GT|uno |UN |NaNs absent / present. | +—––+————+———+————+———————––+ |eq |EQ |ueq |UN | EQ |Equal | +—––+————+———+————+———————––+ |one |LT | GT |ne |UN | LT | GT|Not equal | +—––+————+———+————+———————––+ |lt |LT |ult |UN | LT |Less than | +—––+————+———+————+———————––+ |le |LT | EQ |ule |UN | LT | EQ|Less than or equal | +—––+————+———+————+———————––+ |gt |GT |ugt |UN | GT |Greater than | +—––+————+———+————+———————––+ |ge |GT | EQ |uge |UN | GT | EQ|Greater than or equal | +—––+————+———+————+———————––+

The standard C comparison operators, <, <=, >, >=, are all ordered, so they are false if either operand is NaN. The C equality operator, ==, is ordered, and since inequality is defined as the logical inverse it is unordered. They map to the floatcc condition codes as follows:

==== ====== ============ C Cond Subset ==== ====== ============ == eq EQ != ne UN | LT | GT < lt LT <= le LT | EQ > gt GT >= ge GT | EQ ==== ====== ============

This subset of condition codes also corresponds to the WebAssembly floating point comparisons of the same name.

When this instruction compares floating point vectors, it returns a boolean vector with the results of lane-wise comparisons.

Inputs:

  • Cond: A floating point comparison condition code
  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a:

pub fn ffcmp(self, x: Value, y: Value) -> Value[src]

Floating point comparison returning flags.

Compares two numbers like fcmp, but returns floating point CPU flags instead of testing a specific condition.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • f: CPU flags representing the result of a floating point comparison. These flags can be tested with a :type:floatcc condition code.

pub fn fadd(self, x: Value, y: Value) -> Value[src]

Floating point addition.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: Result of applying operator to each lane

pub fn fsub(self, x: Value, y: Value) -> Value[src]

Floating point subtraction.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: Result of applying operator to each lane

pub fn fmul(self, x: Value, y: Value) -> Value[src]

Floating point multiplication.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: Result of applying operator to each lane

pub fn fdiv(self, x: Value, y: Value) -> Value[src]

Floating point division.

Unlike the integer division instructions andudiv`, this can’t trap. Division by zero is infinity or NaN, depending on the dividend.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: Result of applying operator to each lane

pub fn sqrt(self, x: Value) -> Value[src]

Floating point square root.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: Result of applying operator to each lane

pub fn fma(self, x: Value, y: Value, z: Value) -> Value[src]

Floating point fused multiply-and-add.

Computes a := xy+z without any intermediate rounding of the product.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number
  • z: A scalar or vector floating point number

Outputs:

  • a: Result of applying operator to each lane

pub fn fneg(self, x: Value) -> Value[src]

Floating point negation.

Note that this is a pure bitwise operation.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: x with its sign bit inverted

pub fn fabs(self, x: Value) -> Value[src]

Floating point absolute value.

Note that this is a pure bitwise operation.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: x with its sign bit cleared

pub fn fcopysign(self, x: Value, y: Value) -> Value[src]

Floating point copy sign.

Note that this is a pure bitwise operation. The sign bit from y is copied to the sign bit of x.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: x with its sign bit changed to that of y

pub fn fmin(self, x: Value, y: Value) -> Value[src]

Floating point minimum, propagating NaNs.

If either operand is NaN, this returns a NaN.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: The smaller of x and y

pub fn fmin_pseudo(self, x: Value, y: Value) -> Value[src]

Floating point pseudo-minimum, propagating NaNs. This behaves differently from fmin. See https://github.com/WebAssembly/simd/pull/122 for background.

The behaviour is defined as fmin_pseudo(a, b) = (b < a) ? b : a, and the behaviour for zero or NaN inputs follows from the behaviour of < with such inputs.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: The smaller of x and y

pub fn fmax(self, x: Value, y: Value) -> Value[src]

Floating point maximum, propagating NaNs.

If either operand is NaN, this returns a NaN.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: The larger of x and y

pub fn fmax_pseudo(self, x: Value, y: Value) -> Value[src]

Floating point pseudo-maximum, propagating NaNs. This behaves differently from fmax. See https://github.com/WebAssembly/simd/pull/122 for background.

The behaviour is defined as fmax_pseudo(a, b) = (a < b) ? b : a, and the behaviour for zero or NaN inputs follows from the behaviour of < with such inputs.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: The larger of x and y

pub fn ceil(self, x: Value) -> Value[src]

Round floating point round to integral, towards positive infinity.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: x rounded to integral value

pub fn floor(self, x: Value) -> Value[src]

Round floating point round to integral, towards negative infinity.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: x rounded to integral value

pub fn trunc(self, x: Value) -> Value[src]

Round floating point round to integral, towards zero.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: x rounded to integral value

pub fn nearest(self, x: Value) -> Value[src]

Round floating point round to integral, towards nearest with ties to even.

Inputs:

  • x: A scalar or vector floating point number

Outputs:

  • a: x rounded to integral value

pub fn is_null(self, x: Value) -> Value[src]

Reference verification.

The condition code determines if the reference type in question is null or not.

Inputs:

  • x: A scalar reference type

Outputs:

  • a: A boolean type with 1 bits.

pub fn is_invalid(self, x: Value) -> Value[src]

Reference verification.

The condition code determines if the reference type in question is invalid or not.

Inputs:

  • x: A scalar reference type

Outputs:

  • a: A boolean type with 1 bits.

pub fn trueif<T1>(self, Cond: T1, f: Value) -> Value where
    T1: Into<IntCC>, 
[src]

Test integer CPU flags for a specific condition.

Check the CPU flags in f against the Cond condition code and return true when the condition code is satisfied.

Inputs:

  • Cond: An integer comparison condition code.
  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

Outputs:

  • a: A boolean type with 1 bits.

pub fn trueff<T1>(self, Cond: T1, f: Value) -> Value where
    T1: Into<FloatCC>, 
[src]

Test floating point CPU flags for a specific condition.

Check the CPU flags in f against the Cond condition code and return true when the condition code is satisfied.

Inputs:

  • Cond: A floating point comparison condition code
  • f: CPU flags representing the result of a floating point comparison. These flags can be tested with a :type:floatcc condition code.

Outputs:

  • a: A boolean type with 1 bits.

pub fn bitcast(self, MemTo: Type, x: Value) -> Value[src]

Reinterpret the bits in x as a different type.

The input and output types must be storable to memory and of the same size. A bitcast is equivalent to storing one type and loading the other type from the same address.

Inputs:

  • MemTo (controlling type variable):
  • x: Any type that can be stored in memory

Outputs:

  • a: Bits of x reinterpreted

pub fn raw_bitcast(self, AnyTo: Type, x: Value) -> Value[src]

Cast the bits in x as a different type of the same bit width.

This instruction does not change the data’s representation but allows data in registers to be used as different types, e.g. an i32x4 as a b8x16. The only constraint on the result a is that it can be raw_bitcast back to the original type. Also, in a raw_bitcast between vector types with the same number of lanes, the value of each result lane is a raw_bitcast of the corresponding operand lane. TODO there is currently no mechanism for enforcing the bit width constraint.

Inputs:

  • AnyTo (controlling type variable):
  • x: Any integer, float, boolean, or reference scalar or vector type

Outputs:

  • a: Bits of x reinterpreted

pub fn scalar_to_vector(self, TxN: Type, s: Value) -> Value[src]

Copies a scalar value to a vector value. The scalar is copied into the least significant lane of the vector, and all other lanes will be zero.

Inputs:

  • TxN (controlling type variable): A SIMD vector type
  • s: A scalar value

Outputs:

  • a: A vector value

pub fn breduce(self, BoolTo: Type, x: Value) -> Value[src]

Convert x to a smaller boolean type in the platform-defined way.

The result type must have the same number of vector lanes as the input, and each lane must not have more bits that the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • BoolTo (controlling type variable): A smaller boolean type with the same number of lanes
  • x: A scalar or vector boolean type

Outputs:

  • a: A smaller boolean type with the same number of lanes

pub fn bextend(self, BoolTo: Type, x: Value) -> Value[src]

Convert x to a larger boolean type in the platform-defined way.

The result type must have the same number of vector lanes as the input, and each lane must not have fewer bits that the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • BoolTo (controlling type variable): A larger boolean type with the same number of lanes
  • x: A scalar or vector boolean type

Outputs:

  • a: A larger boolean type with the same number of lanes

pub fn bint(self, IntTo: Type, x: Value) -> Value[src]

Convert x to an integer.

True maps to 1 and false maps to 0. The result type must have the same number of vector lanes as the input.

Inputs:

  • IntTo (controlling type variable): An integer type with the same number of lanes
  • x: A scalar or vector boolean type

Outputs:

  • a: An integer type with the same number of lanes

pub fn bmask(self, IntTo: Type, x: Value) -> Value[src]

Convert x to an integer mask.

True maps to all 1s and false maps to all 0s. The result type must have the same number of vector lanes as the input.

Inputs:

  • IntTo (controlling type variable): An integer type with the same number of lanes
  • x: A scalar or vector boolean type

Outputs:

  • a: An integer type with the same number of lanes

pub fn ireduce(self, IntTo: Type, x: Value) -> Value[src]

Convert x to a smaller integer type by dropping high bits.

Each lane in x is converted to a smaller integer type by discarding the most significant bits. This is the same as reducing modulo 2^n.

The result type must have the same number of vector lanes as the input, and each lane must not have more bits that the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • IntTo (controlling type variable): A smaller integer type with the same number of lanes
  • x: A scalar or vector integer type

Outputs:

  • a: A smaller integer type with the same number of lanes

pub fn snarrow(self, x: Value, y: Value) -> Value[src]

Combine x and y into a vector with twice the lanes but half the integer width while saturating overflowing values to the signed maximum and minimum.

The lanes will be concatenated after narrowing. For example, when x and y are i32x4 and x = [x3, x2, x1, x0] and y = [y3, y2, y1, y0], then after narrowing the value returned is an i16x8: a = [y3', y2', y1', y0', x3', x2', x1', x0'].

Inputs:

  • x: A SIMD vector type containing integer lanes 16 or 32 bits wide
  • y: A SIMD vector type containing integer lanes 16 or 32 bits wide

Outputs:

  • a:

pub fn unarrow(self, x: Value, y: Value) -> Value[src]

Combine x and y into a vector with twice the lanes but half the integer width while saturating overflowing values to the unsigned maximum and minimum.

Note that all input lanes are considered signed: any negative lanes will overflow and be replaced with the unsigned minimum, 0x00.

The lanes will be concatenated after narrowing. For example, when x and y are i32x4 and x = [x3, x2, x1, x0] and y = [y3, y2, y1, y0], then after narrowing the value returned is an i16x8: a = [y3', y2', y1', y0', x3', x2', x1', x0'].

Inputs:

  • x: A SIMD vector type containing integer lanes 16 or 32 bits wide
  • y: A SIMD vector type containing integer lanes 16 or 32 bits wide

Outputs:

  • a:

pub fn swiden_low(self, x: Value) -> Value[src]

Widen the low lanes of x using signed extension.

This will double the lane width and halve the number of lanes.

Inputs:

  • x: A SIMD vector type containing integer lanes 8 or 16 bits wide.

Outputs:

  • a:

pub fn swiden_high(self, x: Value) -> Value[src]

Widen the high lanes of x using signed extension.

This will double the lane width and halve the number of lanes.

Inputs:

  • x: A SIMD vector type containing integer lanes 8 or 16 bits wide.

Outputs:

  • a:

pub fn uwiden_low(self, x: Value) -> Value[src]

Widen the low lanes of x using unsigned extension.

This will double the lane width and halve the number of lanes.

Inputs:

  • x: A SIMD vector type containing integer lanes 8 or 16 bits wide.

Outputs:

  • a:

pub fn uwiden_high(self, x: Value) -> Value[src]

Widen the high lanes of x using unsigned extension.

This will double the lane width and halve the number of lanes.

Inputs:

  • x: A SIMD vector type containing integer lanes 8 or 16 bits wide.

Outputs:

  • a:

pub fn widening_pairwise_dot_product_s(self, x: Value, y: Value) -> Value[src]

Takes corresponding elements in x and y, performs a sign-extending length-doubling multiplication on them, then adds adjacent pairs of elements to form the result. For example, if the input vectors are [x3, x2, x1, x0] and [y3, y2, y1, y0], it produces the vector [r1, r0], where r1 = sx(x3) * sx(y3) + sx(x2) * sx(y2) and r0 = sx(x1) * sx(y1) + sx(x0) * sx(y0), and sx(n) sign-extends n to twice its width.

This will double the lane width and halve the number of lanes. So the resulting vector has the same number of bits as x and y do (individually).

See https://github.com/WebAssembly/simd/pull/127 for background info.

Inputs:

  • x: A SIMD vector type containing 8 integer lanes each 16 bits wide.
  • y: A SIMD vector type containing 8 integer lanes each 16 bits wide.

Outputs:

  • a:

pub fn uextend(self, IntTo: Type, x: Value) -> Value[src]

Convert x to a larger integer type by zero-extending.

Each lane in x is converted to a larger integer type by adding zeroes. The result has the same numerical value as x when both are interpreted as unsigned integers.

The result type must have the same number of vector lanes as the input, and each lane must not have fewer bits that the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • IntTo (controlling type variable): A larger integer type with the same number of lanes
  • x: A scalar or vector integer type

Outputs:

  • a: A larger integer type with the same number of lanes

pub fn sextend(self, IntTo: Type, x: Value) -> Value[src]

Convert x to a larger integer type by sign-extending.

Each lane in x is converted to a larger integer type by replicating the sign bit. The result has the same numerical value as x when both are interpreted as signed integers.

The result type must have the same number of vector lanes as the input, and each lane must not have fewer bits that the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • IntTo (controlling type variable): A larger integer type with the same number of lanes
  • x: A scalar or vector integer type

Outputs:

  • a: A larger integer type with the same number of lanes

pub fn fpromote(self, FloatTo: Type, x: Value) -> Value[src]

Convert x to a larger floating point format.

Each lane in x is converted to the destination floating point format. This is an exact operation.

Cranelift currently only supports two floating point formats

  • f32 and f64. This may change in the future.

The result type must have the same number of vector lanes as the input, and the result lanes must not have fewer bits than the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • FloatTo (controlling type variable): A scalar or vector floating point number
  • x: A scalar or vector floating point number

Outputs:

  • a: A scalar or vector floating point number

pub fn fdemote(self, FloatTo: Type, x: Value) -> Value[src]

Convert x to a smaller floating point format.

Each lane in x is converted to the destination floating point format by rounding to nearest, ties to even.

Cranelift currently only supports two floating point formats

  • f32 and f64. This may change in the future.

The result type must have the same number of vector lanes as the input, and the result lanes must not have more bits than the input lanes. If the input and output types are the same, this is a no-op.

Inputs:

  • FloatTo (controlling type variable): A scalar or vector floating point number
  • x: A scalar or vector floating point number

Outputs:

  • a: A scalar or vector floating point number

pub fn fcvt_to_uint(self, IntTo: Type, x: Value) -> Value[src]

Convert floating point to unsigned integer.

Each lane in x is converted to an unsigned integer by rounding towards zero. If x is NaN or if the unsigned integral value cannot be represented in the result type, this instruction traps.

The result type must have the same number of vector lanes as the input.

Inputs:

  • IntTo (controlling type variable): A larger integer type with the same number of lanes
  • x: A scalar or vector floating point number

Outputs:

  • a: A larger integer type with the same number of lanes

pub fn fcvt_to_uint_sat(self, IntTo: Type, x: Value) -> Value[src]

Convert floating point to unsigned integer as fcvt_to_uint does, but saturates the input instead of trapping. NaN and negative values are converted to 0.

Inputs:

  • IntTo (controlling type variable): A larger integer type with the same number of lanes
  • x: A scalar or vector floating point number

Outputs:

  • a: A larger integer type with the same number of lanes

pub fn fcvt_to_sint(self, IntTo: Type, x: Value) -> Value[src]

Convert floating point to signed integer.

Each lane in x is converted to a signed integer by rounding towards zero. If x is NaN or if the signed integral value cannot be represented in the result type, this instruction traps.

The result type must have the same number of vector lanes as the input.

Inputs:

  • IntTo (controlling type variable): A larger integer type with the same number of lanes
  • x: A scalar or vector floating point number

Outputs:

  • a: A larger integer type with the same number of lanes

pub fn fcvt_to_sint_sat(self, IntTo: Type, x: Value) -> Value[src]

Convert floating point to signed integer as fcvt_to_sint does, but saturates the input instead of trapping. NaN values are converted to 0.

Inputs:

  • IntTo (controlling type variable): A larger integer type with the same number of lanes
  • x: A scalar or vector floating point number

Outputs:

  • a: A larger integer type with the same number of lanes

pub fn fcvt_from_uint(self, FloatTo: Type, x: Value) -> Value[src]

Convert unsigned integer to floating point.

Each lane in x is interpreted as an unsigned integer and converted to floating point using round to nearest, ties to even.

The result type must have the same number of vector lanes as the input.

Inputs:

  • FloatTo (controlling type variable): A scalar or vector floating point number
  • x: A scalar or vector integer type

Outputs:

  • a: A scalar or vector floating point number

pub fn fcvt_from_sint(self, FloatTo: Type, x: Value) -> Value[src]

Convert signed integer to floating point.

Each lane in x is interpreted as a signed integer and converted to floating point using round to nearest, ties to even.

The result type must have the same number of vector lanes as the input.

Inputs:

  • FloatTo (controlling type variable): A scalar or vector floating point number
  • x: A scalar or vector integer type

Outputs:

  • a: A scalar or vector floating point number

pub fn isplit(self, x: Value) -> (Value, Value)[src]

Split an integer into low and high parts.

Vectors of integers are split lane-wise, so the results have the same number of lanes as the input, but the lanes are half the size.

Returns the low half of x and the high half of x as two independent values.

Inputs:

  • x: An integer type with lanes from i16 upwards

Outputs:

  • lo: The low bits of x
  • hi: The high bits of x

pub fn iconcat(self, lo: Value, hi: Value) -> Value[src]

Concatenate low and high bits to form a larger integer type.

Vectors of integers are concatenated lane-wise such that the result has the same number of lanes as the inputs, but the lanes are twice the size.

Inputs:

  • lo: An integer type with lanes type to i64
  • hi: An integer type with lanes type to i64

Outputs:

  • a: The concatenation of lo and hi

pub fn atomic_rmw<T1, T2>(
    self,
    AtomicMem: Type,
    MemFlags: T1,
    AtomicRmwOp: T2,
    p: Value,
    x: Value
) -> Value where
    T1: Into<MemFlags>,
    T2: Into<AtomicRmwOp>, 
[src]

Atomically read-modify-write memory at p, with second operand x. The old value is returned. p has the type of the target word size, and x may be an integer type of 8, 16, 32 or 64 bits, even on a 32-bit target. The type of the returned value is the same as the type of x. This operation is sequentially consistent and creates happens-before edges that order normal (non-atomic) loads and stores.

Inputs:

  • AtomicMem (controlling type variable): Any type that can be stored in memory, which can be used in an atomic operation
  • MemFlags: Memory operation flags
  • AtomicRmwOp: Atomic Read-Modify-Write Ops
  • p: An integer address type
  • x: Value to be atomically stored

Outputs:

  • a: Value atomically loaded

pub fn atomic_cas<T1>(self, MemFlags: T1, p: Value, e: Value, x: Value) -> Value where
    T1: Into<MemFlags>, 
[src]

Perform an atomic compare-and-swap operation on memory at p, with expected value e, storing x if the value at p equals e. The old value at p is returned, regardless of whether the operation succeeds or fails. p has the type of the target word size, and x and e must have the same type and the same size, which may be an integer type of 8, 16, 32 or 64 bits, even on a 32-bit target. The type of the returned value is the same as the type of x and e. This operation is sequentially consistent and creates happens-before edges that order normal (non-atomic) loads and stores.

Inputs:

  • MemFlags: Memory operation flags
  • p: An integer address type
  • e: Expected value in CAS
  • x: Value to be atomically stored

Outputs:

  • a: Value atomically loaded

pub fn atomic_load<T1>(self, AtomicMem: Type, MemFlags: T1, p: Value) -> Value where
    T1: Into<MemFlags>, 
[src]

Atomically load from memory at p.

This is a polymorphic instruction that can load any value type which has a memory representation. It should only be used for integer types with 8, 16, 32 or 64 bits. This operation is sequentially consistent and creates happens-before edges that order normal (non-atomic) loads and stores.

Inputs:

  • AtomicMem (controlling type variable): Any type that can be stored in memory, which can be used in an atomic operation
  • MemFlags: Memory operation flags
  • p: An integer address type

Outputs:

  • a: Value atomically loaded

pub fn atomic_store<T1>(self, MemFlags: T1, x: Value, p: Value) -> Inst where
    T1: Into<MemFlags>, 
[src]

Atomically store x to memory at p.

This is a polymorphic instruction that can store any value type with a memory representation. It should only be used for integer types with 8, 16, 32 or 64 bits. This operation is sequentially consistent and creates happens-before edges that order normal (non-atomic) loads and stores.

Inputs:

  • MemFlags: Memory operation flags
  • x: Value to be atomically stored
  • p: An integer address type

pub fn fence(self) -> Inst[src]

A memory fence. This must provide ordering to ensure that, at a minimum, neither loads nor stores of any kind may move forwards or backwards across the fence. This operation is sequentially consistent.

pub fn x86_udivmodx(self, nlo: Value, nhi: Value, d: Value) -> (Value, Value)[src]

Extended unsigned division.

Concatenate the bits in nhi and nlo to form the numerator. Interpret the bits as an unsigned number and divide by the unsigned denominator d. Trap when d is zero or if the quotient is larger than the range of the output.

Return both quotient and remainder.

Inputs:

  • nlo: Low part of numerator
  • nhi: High part of numerator
  • d: Denominator

Outputs:

  • q: Quotient
  • r: Remainder

pub fn x86_sdivmodx(self, nlo: Value, nhi: Value, d: Value) -> (Value, Value)[src]

Extended signed division.

Concatenate the bits in nhi and nlo to form the numerator. Interpret the bits as a signed number and divide by the signed denominator d. Trap when d is zero or if the quotient is outside the range of the output.

Return both quotient and remainder.

Inputs:

  • nlo: Low part of numerator
  • nhi: High part of numerator
  • d: Denominator

Outputs:

  • q: Quotient
  • r: Remainder

pub fn x86_umulx(self, argL: Value, argR: Value) -> (Value, Value)[src]

Unsigned integer multiplication, producing a double-length result.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • argL: A scalar integer machine word
  • argR: A scalar integer machine word

Outputs:

  • resLo: A scalar integer machine word
  • resHi: A scalar integer machine word

pub fn x86_smulx(self, argL: Value, argR: Value) -> (Value, Value)[src]

Signed integer multiplication, producing a double-length result.

Polymorphic over all scalar integer types, but does not support vector types.

Inputs:

  • argL: A scalar integer machine word
  • argR: A scalar integer machine word

Outputs:

  • resLo: A scalar integer machine word
  • resHi: A scalar integer machine word

pub fn x86_cvtt2si(self, IntTo: Type, x: Value) -> Value[src]

Convert with truncation floating point to signed integer.

The source floating point operand is converted to a signed integer by rounding towards zero. If the result can’t be represented in the output type, returns the smallest signed value the output type can represent.

This instruction does not trap.

Inputs:

  • IntTo (controlling type variable): An integer type with the same number of lanes
  • x: A scalar or vector floating point number

Outputs:

  • a: An integer type with the same number of lanes

pub fn x86_vcvtudq2ps(self, x: Value) -> Value[src]

Convert unsigned integer to floating point.

Convert packed doubleword unsigned integers to packed single-precision floating-point values. This instruction does not trap.

Inputs:

  • x: An integer type with the same number of lanes

Outputs:

  • a: A floating point number

pub fn x86_fmin(self, x: Value, y: Value) -> Value[src]

Floating point minimum with x86 semantics.

This is equivalent to the C ternary operator x < y ? x : y which differs from fmin when either operand is NaN or when comparing +0.0 to -0.0.

When the two operands don’t compare as LT, y is returned unchanged, even if it is a signalling NaN.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: A scalar or vector floating point number

pub fn x86_fmax(self, x: Value, y: Value) -> Value[src]

Floating point maximum with x86 semantics.

This is equivalent to the C ternary operator x > y ? x : y which differs from fmax when either operand is NaN or when comparing +0.0 to -0.0.

When the two operands don’t compare as GT, y is returned unchanged, even if it is a signalling NaN.

Inputs:

  • x: A scalar or vector floating point number
  • y: A scalar or vector floating point number

Outputs:

  • a: A scalar or vector floating point number

pub fn x86_push(self, x: Value) -> Inst[src]

Pushes a value onto the stack.

Decrements the stack pointer and stores the specified value on to the top.

This is polymorphic in i32 and i64. However, it is only implemented for i64 in 64-bit mode, and only for i32 in 32-bit mode.

Inputs:

  • x: A scalar integer machine word

pub fn x86_pop(self, iWord: Type) -> Value[src]

Pops a value from the stack.

Loads a value from the top of the stack and then increments the stack pointer.

This is polymorphic in i32 and i64. However, it is only implemented for i64 in 64-bit mode, and only for i32 in 32-bit mode.

Inputs:

  • iWord (controlling type variable): A scalar integer machine word

Outputs:

  • x: A scalar integer machine word

pub fn x86_bsr(self, x: Value) -> (Value, Value)[src]

Bit Scan Reverse – returns the bit-index of the most significant 1 in the word. Result is undefined if the argument is zero. However, it sets the Z flag depending on the argument, so it is at least easy to detect and handle that case.

This is polymorphic in i32 and i64. It is implemented for both i64 and i32 in 64-bit mode, and only for i32 in 32-bit mode.

Inputs:

  • x: A scalar integer machine word

Outputs:

  • y: A scalar integer machine word
  • rflags: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn x86_bsf(self, x: Value) -> (Value, Value)[src]

Bit Scan Forwards – returns the bit-index of the least significant 1 in the word. Is otherwise identical to ‘bsr’, just above.

Inputs:

  • x: A scalar integer machine word

Outputs:

  • y: A scalar integer machine word
  • rflags: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn x86_pshufd<T1>(self, a: Value, i: T1) -> Value where
    T1: Into<u8>, 
[src]

Packed Shuffle Doublewords – copies data from either memory or lanes in an extended register and re-orders the data according to the passed immediate byte.

Inputs:

  • a: A vector value (i.e. held in an XMM register)
  • i: An ordering operand controlling the copying of data from the source to the destination; see PSHUFD in Intel manual for details

Outputs:

  • a: A vector value (i.e. held in an XMM register)

pub fn x86_pshufb(self, a: Value, b: Value) -> Value[src]

Packed Shuffle Bytes – re-orders data in an extended register using a shuffle mask from either memory or another extended register

Inputs:

  • a: A vector value (i.e. held in an XMM register)
  • b: A vector value (i.e. held in an XMM register)

Outputs:

  • a: A vector value (i.e. held in an XMM register)

pub fn x86_pblendw<T1>(self, a: Value, b: Value, mask: T1) -> Value where
    T1: Into<u8>, 
[src]

Blend packed words using an immediate mask. Each bit of the 8-bit immediate corresponds to a lane in b: if the bit is set, the lane is copied into a.

Inputs:

  • a: A vector value (i.e. held in an XMM register)
  • b: A vector value (i.e. held in an XMM register)
  • mask: mask to select lanes from b

Outputs:

  • a: A vector value (i.e. held in an XMM register)

pub fn x86_pextr<T1>(self, x: Value, Idx: T1) -> Value where
    T1: Into<u8>, 
[src]

Extract lane Idx from x. The lane index, Idx, is an immediate value, not an SSA value. It must indicate a valid lane index for the type of x.

Inputs:

  • x: A SIMD vector type
  • Idx: Lane index

Outputs:

  • a:

pub fn x86_pinsr<T1>(self, x: Value, y: Value, Idx: T1) -> Value where
    T1: Into<u8>, 
[src]

Insert y into x at lane Idx. The lane index, Idx, is an immediate value, not an SSA value. It must indicate a valid lane index for the type of x.

Inputs:

  • x: A SIMD vector type containing only booleans and integers
  • y: New lane value
  • Idx: Lane index

Outputs:

  • a: A SIMD vector type containing only booleans and integers

pub fn x86_insertps<T1>(self, x: Value, y: Value, Idx: T1) -> Value where
    T1: Into<u8>, 
[src]

Insert a lane of y into x at using Idx to encode both which lane the value is extracted from and which it is inserted to. This is similar to x86_pinsr but inserts floats, which are already stored in an XMM register.

Inputs:

  • x: A SIMD vector type containing floats
  • y: New lane value
  • Idx: Lane index

Outputs:

  • a: A SIMD vector type containing floats

pub fn x86_punpckh(self, x: Value, y: Value) -> Value[src]

Unpack the high-order lanes of x and y and interleave into a. With notional i8x4 vectors, where x = [x3, x2, x1, x0] and y = [y3, y2, y1, y0], this operation would result in a = [y3, x3, y2, x2] (using the Intel manual’s right-to-left lane ordering).

Inputs:

  • x: A SIMD vector type
  • y: A SIMD vector type

Outputs:

  • a: A SIMD vector type

pub fn x86_punpckl(self, x: Value, y: Value) -> Value[src]

Unpack the low-order lanes of x and y and interleave into a. With notional i8x4 vectors, where x = [x3, x2, x1, x0] and y = [y3, y2, y1, y0], this operation would result in a = [y1, x1, y0, x0] (using the Intel manual’s right-to-left lane ordering).

Inputs:

  • x: A SIMD vector type
  • y: A SIMD vector type

Outputs:

  • a: A SIMD vector type

pub fn x86_movsd(self, x: Value, y: Value) -> Value[src]

Move the low 64 bits of the float vector y to the low 64 bits of float vector x

Inputs:

  • x: A SIMD vector type containing floats
  • y: A SIMD vector type containing floats

Outputs:

  • a: A SIMD vector type containing floats

pub fn x86_movlhps(self, x: Value, y: Value) -> Value[src]

Move the low 64 bits of the float vector y to the high 64 bits of float vector x

Inputs:

  • x: A SIMD vector type containing floats
  • y: A SIMD vector type containing floats

Outputs:

  • a: A SIMD vector type containing floats

pub fn x86_psll(self, x: Value, y: Value) -> Value[src]

Shift Packed Data Left Logical – This implements the behavior of the shared instruction ishl but alters the shift operand to live in an XMM register as expected by the PSLL* family of instructions.

Inputs:

  • x: Vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_psrl(self, x: Value, y: Value) -> Value[src]

Shift Packed Data Right Logical – This implements the behavior of the shared instruction ushr but alters the shift operand to live in an XMM register as expected by the PSRL* family of instructions.

Inputs:

  • x: Vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_psra(self, x: Value, y: Value) -> Value[src]

Shift Packed Data Right Arithmetic – This implements the behavior of the shared instruction sshr but alters the shift operand to live in an XMM register as expected by the PSRA* family of instructions.

Inputs:

  • x: Vector value to shift
  • y: Number of bits to shift

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_pmullq(self, x: Value, y: Value) -> Value[src]

Multiply Packed Integers – Multiply two 64x2 integers and receive a 64x2 result with lane-wise wrapping if the result overflows. This instruction is necessary to add distinct encodings for CPUs with newer vector features.

Inputs:

  • x: A SIMD vector type containing two 64-bit integers
  • y: A SIMD vector type containing two 64-bit integers

Outputs:

  • a: A SIMD vector type containing two 64-bit integers

pub fn x86_pmuludq(self, x: Value, y: Value) -> Value[src]

Multiply Packed Integers – Using only the bottom 32 bits in each lane, multiply two 64x2 unsigned integers and receive a 64x2 result. This instruction avoids the need for handling overflow as in x86_pmullq.

Inputs:

  • x: A SIMD vector type containing two 64-bit integers
  • y: A SIMD vector type containing two 64-bit integers

Outputs:

  • a: A SIMD vector type containing two 64-bit integers

pub fn x86_ptest(self, x: Value, y: Value) -> Value[src]

Logical Compare – PTEST will set the ZF flag if all bits in the result are 0 of the bitwise AND of the first source operand (first operand) and the second source operand (second operand). PTEST sets the CF flag if all bits in the result are 0 of the bitwise AND of the second source operand (second operand) and the logical NOT of the destination operand (first operand).

Inputs:

  • x: A SIMD vector type
  • y: A SIMD vector type

Outputs:

  • f: CPU flags representing the result of an integer comparison. These flags can be tested with an :type:intcc condition code.

pub fn x86_pmaxs(self, x: Value, y: Value) -> Value[src]

Maximum of Packed Signed Integers – Compare signed integers in the first and second operand and return the maximum values.

Inputs:

  • x: A SIMD vector type containing integers
  • y: A SIMD vector type containing integers

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_pmaxu(self, x: Value, y: Value) -> Value[src]

Maximum of Packed Unsigned Integers – Compare unsigned integers in the first and second operand and return the maximum values.

Inputs:

  • x: A SIMD vector type containing integers
  • y: A SIMD vector type containing integers

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_pmins(self, x: Value, y: Value) -> Value[src]

Minimum of Packed Signed Integers – Compare signed integers in the first and second operand and return the minimum values.

Inputs:

  • x: A SIMD vector type containing integers
  • y: A SIMD vector type containing integers

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_pminu(self, x: Value, y: Value) -> Value[src]

Minimum of Packed Unsigned Integers – Compare unsigned integers in the first and second operand and return the minimum values.

Inputs:

  • x: A SIMD vector type containing integers
  • y: A SIMD vector type containing integers

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_palignr<T1>(self, x: Value, y: Value, c: T1) -> Value where
    T1: Into<u8>, 
[src]

Concatenate destination and source operands, extracting a byte-aligned result shifted to the right by c.

Inputs:

  • x: A SIMD vector type containing integers
  • y: A SIMD vector type containing integers
  • c: The number of bytes to shift right; see PALIGNR in Intel manual for details

Outputs:

  • a: A SIMD vector type containing integers

pub fn x86_elf_tls_get_addr(self, GV: GlobalValue) -> Value[src]

Elf tls get addr – This implements the GD TLS model for ELF. The clobber output should not be used.

Inputs:

  • GV: A global value.

Outputs:

  • addr: A scalar 64bit integer

pub fn x86_macho_tls_get_addr(self, GV: GlobalValue) -> Value[src]

Mach-O tls get addr – This implements TLS access for Mach-O. The clobber output should not be used.

Inputs:

  • GV: A global value.

Outputs:

  • addr: A scalar 64bit integer

pub fn AtomicCas(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    arg0: Value,
    arg1: Value,
    arg2: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

AtomicCas(imms=(flags: ir::MemFlags), vals=3)

pub fn AtomicRmw(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    op: AtomicRmwOp,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

AtomicRmw(imms=(flags: ir::MemFlags, op: ir::AtomicRmwOp), vals=2)

pub fn Binary(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

Binary(imms=(), vals=2)

pub fn BinaryImm64(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: Imm64,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

BinaryImm64(imms=(imm: ir::immediates::Imm64), vals=1)

pub fn BinaryImm8(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: u8,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

BinaryImm8(imms=(imm: ir::immediates::Uimm8), vals=1)

pub fn Branch(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    destination: Block,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

Branch(imms=(destination: ir::Block), vals=1)

pub fn BranchFloat(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: FloatCC,
    destination: Block,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

BranchFloat(imms=(cond: ir::condcodes::FloatCC, destination: ir::Block), vals=1)

pub fn BranchIcmp(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    destination: Block,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

BranchIcmp(imms=(cond: ir::condcodes::IntCC, destination: ir::Block), vals=2)

pub fn BranchInt(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    destination: Block,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

BranchInt(imms=(cond: ir::condcodes::IntCC, destination: ir::Block), vals=1)

pub fn BranchTable(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    destination: Block,
    table: JumpTable,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

BranchTable(imms=(destination: ir::Block, table: ir::JumpTable), vals=1)

pub fn BranchTableBase(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    table: JumpTable
) -> (Inst, &'f mut DataFlowGraph)
[src]

BranchTableBase(imms=(table: ir::JumpTable), vals=0)

pub fn BranchTableEntry(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: u8,
    table: JumpTable,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

BranchTableEntry(imms=(imm: ir::immediates::Uimm8, table: ir::JumpTable), vals=2)

pub fn Call(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    func_ref: FuncRef,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

Call(imms=(func_ref: ir::FuncRef), vals=0)

pub fn CallIndirect(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    sig_ref: SigRef,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

CallIndirect(imms=(sig_ref: ir::SigRef), vals=1)

pub fn CondTrap(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    code: TrapCode,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

CondTrap(imms=(code: ir::TrapCode), vals=1)

pub fn CopySpecial(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    src: u16,
    dst: u16
) -> (Inst, &'f mut DataFlowGraph)
[src]

CopySpecial(imms=(src: isa::RegUnit, dst: isa::RegUnit), vals=0)

pub fn CopyToSsa(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    src: u16
) -> (Inst, &'f mut DataFlowGraph)
[src]

CopyToSsa(imms=(src: isa::RegUnit), vals=0)

pub fn FloatCompare(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: FloatCC,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

FloatCompare(imms=(cond: ir::condcodes::FloatCC), vals=2)

pub fn FloatCond(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: FloatCC,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

FloatCond(imms=(cond: ir::condcodes::FloatCC), vals=1)

pub fn FloatCondTrap(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: FloatCC,
    code: TrapCode,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

FloatCondTrap(imms=(cond: ir::condcodes::FloatCC, code: ir::TrapCode), vals=1)

pub fn FuncAddr(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    func_ref: FuncRef
) -> (Inst, &'f mut DataFlowGraph)
[src]

FuncAddr(imms=(func_ref: ir::FuncRef), vals=0)

pub fn HeapAddr(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    heap: Heap,
    imm: Uimm32,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

HeapAddr(imms=(heap: ir::Heap, imm: ir::immediates::Uimm32), vals=1)

pub fn IndirectJump(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    table: JumpTable,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

IndirectJump(imms=(table: ir::JumpTable), vals=1)

pub fn IntCompare(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

IntCompare(imms=(cond: ir::condcodes::IntCC), vals=2)

pub fn IntCompareImm(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    imm: Imm64,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

IntCompareImm(imms=(cond: ir::condcodes::IntCC, imm: ir::immediates::Imm64), vals=1)

pub fn IntCond(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

IntCond(imms=(cond: ir::condcodes::IntCC), vals=1)

pub fn IntCondTrap(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    code: TrapCode,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

IntCondTrap(imms=(cond: ir::condcodes::IntCC, code: ir::TrapCode), vals=1)

pub fn IntSelect(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    cond: IntCC,
    arg0: Value,
    arg1: Value,
    arg2: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

IntSelect(imms=(cond: ir::condcodes::IntCC), vals=3)

pub fn Jump(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    destination: Block,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

Jump(imms=(destination: ir::Block), vals=0)

pub fn Load(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    offset: Offset32,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

Load(imms=(flags: ir::MemFlags, offset: ir::immediates::Offset32), vals=1)

pub fn LoadComplex(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    offset: Offset32,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

LoadComplex(imms=(flags: ir::MemFlags, offset: ir::immediates::Offset32), vals=0)

pub fn LoadNoOffset(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

LoadNoOffset(imms=(flags: ir::MemFlags), vals=1)

pub fn MultiAry(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

MultiAry(imms=(), vals=0)

pub fn NullAry(
    self,
    opcode: Opcode,
    ctrl_typevar: Type
) -> (Inst, &'f mut DataFlowGraph)
[src]

NullAry(imms=(), vals=0)

pub fn RegFill(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    src: StackSlot,
    dst: u16,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

RegFill(imms=(src: ir::StackSlot, dst: isa::RegUnit), vals=1)

pub fn RegMove(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    src: u16,
    dst: u16,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

RegMove(imms=(src: isa::RegUnit, dst: isa::RegUnit), vals=1)

pub fn RegSpill(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    src: u16,
    dst: StackSlot,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

RegSpill(imms=(src: isa::RegUnit, dst: ir::StackSlot), vals=1)

pub fn Shuffle(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    mask: Immediate,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

Shuffle(imms=(mask: ir::Immediate), vals=2)

pub fn StackLoad(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    stack_slot: StackSlot,
    offset: Offset32
) -> (Inst, &'f mut DataFlowGraph)
[src]

StackLoad(imms=(stack_slot: ir::StackSlot, offset: ir::immediates::Offset32), vals=0)

pub fn StackStore(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    stack_slot: StackSlot,
    offset: Offset32,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

StackStore(imms=(stack_slot: ir::StackSlot, offset: ir::immediates::Offset32), vals=1)

pub fn Store(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    offset: Offset32,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

Store(imms=(flags: ir::MemFlags, offset: ir::immediates::Offset32), vals=2)

pub fn StoreComplex(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    offset: Offset32,
    args: EntityList<Value>
) -> (Inst, &'f mut DataFlowGraph)
[src]

StoreComplex(imms=(flags: ir::MemFlags, offset: ir::immediates::Offset32), vals=1)

pub fn StoreNoOffset(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    flags: MemFlags,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

StoreNoOffset(imms=(flags: ir::MemFlags), vals=2)

pub fn TableAddr(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    table: Table,
    offset: Offset32,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

TableAddr(imms=(table: ir::Table, offset: ir::immediates::Offset32), vals=1)

pub fn Ternary(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    arg0: Value,
    arg1: Value,
    arg2: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

Ternary(imms=(), vals=3)

pub fn TernaryImm8(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: u8,
    arg0: Value,
    arg1: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

TernaryImm8(imms=(imm: ir::immediates::Uimm8), vals=2)

pub fn Trap(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    code: TrapCode
) -> (Inst, &'f mut DataFlowGraph)
[src]

Trap(imms=(code: ir::TrapCode), vals=0)

pub fn Unary(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    arg0: Value
) -> (Inst, &'f mut DataFlowGraph)
[src]

Unary(imms=(), vals=1)

pub fn UnaryBool(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: bool
) -> (Inst, &'f mut DataFlowGraph)
[src]

UnaryBool(imms=(imm: bool), vals=0)

pub fn UnaryConst(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    constant_handle: Constant
) -> (Inst, &'f mut DataFlowGraph)
[src]

UnaryConst(imms=(constant_handle: ir::Constant), vals=0)

pub fn UnaryGlobalValue(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    global_value: GlobalValue
) -> (Inst, &'f mut DataFlowGraph)
[src]

UnaryGlobalValue(imms=(global_value: ir::GlobalValue), vals=0)

pub fn UnaryIeee32(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: Ieee32
) -> (Inst, &'f mut DataFlowGraph)
[src]

UnaryIeee32(imms=(imm: ir::immediates::Ieee32), vals=0)

pub fn UnaryIeee64(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: Ieee64
) -> (Inst, &'f mut DataFlowGraph)
[src]

UnaryIeee64(imms=(imm: ir::immediates::Ieee64), vals=0)

pub fn UnaryImm(
    self,
    opcode: Opcode,
    ctrl_typevar: Type,
    imm: Imm64
) -> (Inst, &'f mut DataFlowGraph)
[src]

UnaryImm(imms=(imm: ir::immediates::Imm64), vals=0)

Loading content...

Implementors

impl<'f, T> InstBuilder<'f> for T where
    T: InstBuilderBase<'f>, 
[src]

Any type implementing InstBuilderBase gets all the InstBuilder methods for free.

Loading content...